1 /*
2  * iPXE driver for Marvell Yukon chipset and SysKonnect Gigabit
3  * Ethernet adapters. Derived from Linux skge driver (v1.13), which was
4  * based on earlier sk98lin, e100 and FreeBSD if_sk drivers.
5  *
6  * This driver intentionally does not support all the features of the
7  * original driver such as link fail-over and link management because
8  * those should be done at higher levels.
9  *
10  * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11  *
12  * Modified for iPXE, July 2008 by Michael Decker <mrd999@gmail.com>
13  * Tested and Modified in December 2009 by
14  *    Thomas Miletich <thomas.miletich@gmail.com>
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28  * 02110-1301, USA.
29  */
30 
31 FILE_LICENCE ( GPL2_ONLY );
32 
33 #include <stdint.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <unistd.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/ethernet.h>
39 #include <ipxe/if_ether.h>
40 #include <ipxe/iobuf.h>
41 #include <ipxe/malloc.h>
42 #include <ipxe/pci.h>
43 
44 #include "skge.h"
45 
46 static struct pci_device_id skge_id_table[] = {
47 	PCI_ROM(0x10b7, 0x1700,     "3C940",     "3COM 3C940", 0),
48 	PCI_ROM(0x10b7, 0x80eb,     "3C940B",    "3COM 3C940", 0),
49 	PCI_ROM(0x1148, 0x4300,     "GE",        "Syskonnect GE", 0),
50 	PCI_ROM(0x1148, 0x4320,     "YU",        "Syskonnect YU", 0),
51 	PCI_ROM(0x1186, 0x4C00,     "DGE510T",   "DLink DGE-510T", 0),
52 	PCI_ROM(0x1186, 0x4b01,     "DGE530T",   "DLink DGE-530T", 0),
53 	PCI_ROM(0x11ab, 0x4320,     "id4320",    "Marvell id4320", 0),
54 	PCI_ROM(0x11ab, 0x5005,     "id5005",    "Marvell id5005", 0), /* Belkin */
55 	PCI_ROM(0x1371, 0x434e,     "Gigacard",  "CNET Gigacard", 0),
56 	PCI_ROM(0x1737, 0x1064,     "EG1064",    "Linksys EG1064", 0),
57 	PCI_ROM(0x1737, 0xffff,     "id_any",    "Linksys [any]", 0)
58 };
59 
60 static int skge_up(struct net_device *dev);
61 static void skge_down(struct net_device *dev);
62 static void skge_tx_clean(struct net_device *dev);
63 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
64 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
65 static void yukon_init(struct skge_hw *hw, int port);
66 static void genesis_mac_init(struct skge_hw *hw, int port);
67 static void genesis_link_up(struct skge_port *skge);
68 
69 static void skge_phyirq(struct skge_hw *hw);
70 static void skge_poll(struct net_device *dev);
71 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob);
72 static void skge_net_irq ( struct net_device *dev, int enable );
73 
74 static void skge_rx_refill(struct net_device *dev);
75 
76 static struct net_device_operations skge_operations = {
77 	.open     = skge_up,
78 	.close    = skge_down,
79 	.transmit = skge_xmit_frame,
80 	.poll     = skge_poll,
81 	.irq      = skge_net_irq
82 };
83 
84 /* Avoid conditionals by using array */
85 static const int txqaddr[] = { Q_XA1, Q_XA2 };
86 static const int rxqaddr[] = { Q_R1, Q_R2 };
87 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
88 
89 /* Determine supported/advertised modes based on hardware.
90  * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
91  */
skge_supported_modes(const struct skge_hw * hw)92 static u32 skge_supported_modes(const struct skge_hw *hw)
93 {
94 	u32 supported;
95 
96 	if (hw->copper) {
97 		supported = SUPPORTED_10baseT_Half
98 			| SUPPORTED_10baseT_Full
99 			| SUPPORTED_100baseT_Half
100 			| SUPPORTED_100baseT_Full
101 			| SUPPORTED_1000baseT_Half
102 			| SUPPORTED_1000baseT_Full
103 			| SUPPORTED_Autoneg| SUPPORTED_TP;
104 
105 		if (hw->chip_id == CHIP_ID_GENESIS)
106 			supported &= ~(SUPPORTED_10baseT_Half
107 					     | SUPPORTED_10baseT_Full
108 					     | SUPPORTED_100baseT_Half
109 					     | SUPPORTED_100baseT_Full);
110 
111 		else if (hw->chip_id == CHIP_ID_YUKON)
112 			supported &= ~SUPPORTED_1000baseT_Half;
113 	} else
114 		supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
115 			| SUPPORTED_FIBRE | SUPPORTED_Autoneg;
116 
117 	return supported;
118 }
119 
120 /* Chip internal frequency for clock calculations */
hwkhz(const struct skge_hw * hw)121 static inline u32 hwkhz(const struct skge_hw *hw)
122 {
123 	return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
124 }
125 
126 /* Microseconds to chip HZ */
skge_usecs2clk(const struct skge_hw * hw,u32 usec)127 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
128 {
129 	return hwkhz(hw) * usec / 1000;
130 }
131 
132 enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
skge_led(struct skge_port * skge,enum led_mode mode)133 static void skge_led(struct skge_port *skge, enum led_mode mode)
134 {
135 	struct skge_hw *hw = skge->hw;
136 	int port = skge->port;
137 
138 	if (hw->chip_id == CHIP_ID_GENESIS) {
139 		switch (mode) {
140 		case LED_MODE_OFF:
141 			if (hw->phy_type == SK_PHY_BCOM)
142 				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
143 			else {
144 				skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
145 				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
146 			}
147 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
148 			skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
149 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
150 			break;
151 
152 		case LED_MODE_ON:
153 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
154 			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
155 
156 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
157 			skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
158 
159 			break;
160 
161 		case LED_MODE_TST:
162 			skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
163 			skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
164 			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
165 
166 			if (hw->phy_type == SK_PHY_BCOM)
167 				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
168 			else {
169 				skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
170 				skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
171 				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
172 			}
173 
174 		}
175 	} else {
176 		switch (mode) {
177 		case LED_MODE_OFF:
178 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
179 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
180 				     PHY_M_LED_MO_DUP(MO_LED_OFF)  |
181 				     PHY_M_LED_MO_10(MO_LED_OFF)   |
182 				     PHY_M_LED_MO_100(MO_LED_OFF)  |
183 				     PHY_M_LED_MO_1000(MO_LED_OFF) |
184 				     PHY_M_LED_MO_RX(MO_LED_OFF));
185 			break;
186 		case LED_MODE_ON:
187 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
188 				     PHY_M_LED_PULS_DUR(PULS_170MS) |
189 				     PHY_M_LED_BLINK_RT(BLINK_84MS) |
190 				     PHY_M_LEDC_TX_CTRL |
191 				     PHY_M_LEDC_DP_CTRL);
192 
193 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
194 				     PHY_M_LED_MO_RX(MO_LED_OFF) |
195 				     (skge->speed == SPEED_100 ?
196 				      PHY_M_LED_MO_100(MO_LED_ON) : 0));
197 			break;
198 		case LED_MODE_TST:
199 			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
200 			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
201 				     PHY_M_LED_MO_DUP(MO_LED_ON)  |
202 				     PHY_M_LED_MO_10(MO_LED_ON)   |
203 				     PHY_M_LED_MO_100(MO_LED_ON)  |
204 				     PHY_M_LED_MO_1000(MO_LED_ON) |
205 				     PHY_M_LED_MO_RX(MO_LED_ON));
206 		}
207 	}
208 }
209 
210 /*
211  * I've left in these EEPROM and VPD functions, as someone may desire to
212  * integrate them in the future. -mdeck
213  *
214  * static int skge_get_eeprom_len(struct net_device *dev)
215  * {
216  * 	struct skge_port *skge = netdev_priv(dev);
217  * 	u32 reg2;
218  *
219  * 	pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
220  * 	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
221  * }
222  *
223  * static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
224  * {
225  * 	u32 val;
226  *
227  * 	pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
228  *
229  * 	do {
230  * 		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
231  * 	} while (!(offset & PCI_VPD_ADDR_F));
232  *
233  * 	pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
234  * 	return val;
235  * }
236  *
237  * static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
238  * {
239  * 	pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
240  * 	pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
241  * 			      offset | PCI_VPD_ADDR_F);
242  *
243  * 	do {
244  * 		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
245  * 	} while (offset & PCI_VPD_ADDR_F);
246  * }
247  *
248  * static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
249  * 			   u8 *data)
250  * {
251  * 	struct skge_port *skge = netdev_priv(dev);
252  * 	struct pci_dev *pdev = skge->hw->pdev;
253  * 	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
254  * 	int length = eeprom->len;
255  * 	u16 offset = eeprom->offset;
256  *
257  * 	if (!cap)
258  * 		return -EINVAL;
259  *
260  * 	eeprom->magic = SKGE_EEPROM_MAGIC;
261  *
262  * 	while (length > 0) {
263  * 		u32 val = skge_vpd_read(pdev, cap, offset);
264  * 		int n = min_t(int, length, sizeof(val));
265  *
266  * 		memcpy(data, &val, n);
267  * 		length -= n;
268  * 		data += n;
269  * 		offset += n;
270  * 	}
271  * 	return 0;
272  * }
273  *
274  * static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
275  * 			   u8 *data)
276  * {
277  * 	struct skge_port *skge = netdev_priv(dev);
278  * 	struct pci_dev *pdev = skge->hw->pdev;
279  * 	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
280  * 	int length = eeprom->len;
281  * 	u16 offset = eeprom->offset;
282  *
283  * 	if (!cap)
284  * 		return -EINVAL;
285  *
286  * 	if (eeprom->magic != SKGE_EEPROM_MAGIC)
287  * 		return -EINVAL;
288  *
289  * 	while (length > 0) {
290  * 		u32 val;
291  * 		int n = min_t(int, length, sizeof(val));
292  *
293  * 		if (n < sizeof(val))
294  * 			val = skge_vpd_read(pdev, cap, offset);
295  * 		memcpy(&val, data, n);
296  *
297  * 		skge_vpd_write(pdev, cap, offset, val);
298  *
299  * 		length -= n;
300  * 		data += n;
301  * 		offset += n;
302  * 	}
303  * 	return 0;
304  * }
305  */
306 
307 /*
308  * Allocate ring elements and chain them together
309  * One-to-one association of board descriptors with ring elements
310  */
skge_ring_alloc(struct skge_ring * ring,void * vaddr,u32 base,size_t num)311 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base,
312                            size_t num)
313 {
314 	struct skge_tx_desc *d;
315 	struct skge_element *e;
316 	unsigned int i;
317 
318 	ring->start = zalloc(num*sizeof(*e));
319 	if (!ring->start)
320 		return -ENOMEM;
321 
322 	for (i = 0, e = ring->start, d = vaddr; i < num; i++, e++, d++) {
323 		e->desc = d;
324 		if (i == num - 1) {
325 			e->next = ring->start;
326 			d->next_offset = base;
327 		} else {
328 			e->next = e + 1;
329 			d->next_offset = base + (i+1) * sizeof(*d);
330 		}
331 	}
332 	ring->to_use = ring->to_clean = ring->start;
333 
334 	return 0;
335 }
336 
337 /* Allocate and setup a new buffer for receiving */
skge_rx_setup(struct skge_port * skge __unused,struct skge_element * e,struct io_buffer * iob,unsigned int bufsize)338 static void skge_rx_setup(struct skge_port *skge __unused,
339 			  struct skge_element *e,
340 			  struct io_buffer *iob, unsigned int bufsize)
341 {
342 	struct skge_rx_desc *rd = e->desc;
343 	u64 map;
344 
345 	map = ( iob != NULL ) ? virt_to_bus(iob->data) : 0;
346 
347 	rd->dma_lo = map;
348 	rd->dma_hi = map >> 32;
349 	e->iob = iob;
350 	rd->csum1_start = ETH_HLEN;
351 	rd->csum2_start = ETH_HLEN;
352 	rd->csum1 = 0;
353 	rd->csum2 = 0;
354 
355 	wmb();
356 
357 	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
358 }
359 
360 /* Resume receiving using existing skb,
361  * Note: DMA address is not changed by chip.
362  * 	 MTU not changed while receiver active.
363  */
skge_rx_reuse(struct skge_element * e,unsigned int size)364 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
365 {
366 	struct skge_rx_desc *rd = e->desc;
367 
368 	rd->csum2 = 0;
369 	rd->csum2_start = ETH_HLEN;
370 
371 	wmb();
372 
373 	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
374 }
375 
376 
377 /* Free all  buffers in receive ring, assumes receiver stopped */
skge_rx_clean(struct skge_port * skge)378 static void skge_rx_clean(struct skge_port *skge)
379 {
380 	struct skge_ring *ring = &skge->rx_ring;
381 	struct skge_element *e;
382 
383 	e = ring->start;
384 	do {
385 		struct skge_rx_desc *rd = e->desc;
386 		rd->control = 0;
387 		if (e->iob) {
388 			free_iob(e->iob);
389 			e->iob = NULL;
390 		}
391 	} while ((e = e->next) != ring->start);
392 }
393 
skge_link_up(struct skge_port * skge)394 static void skge_link_up(struct skge_port *skge)
395 {
396 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
397 		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
398 
399 	netdev_link_up(skge->netdev);
400 
401 	DBG2(PFX "%s: Link is up at %d Mbps, %s duplex\n",
402 	     skge->netdev->name, skge->speed,
403 	     skge->duplex == DUPLEX_FULL ? "full" : "half");
404 }
405 
skge_link_down(struct skge_port * skge)406 static void skge_link_down(struct skge_port *skge)
407 {
408 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
409 	netdev_link_down(skge->netdev);
410 
411 	DBG2(PFX "%s: Link is down.\n", skge->netdev->name);
412 }
413 
414 
xm_link_down(struct skge_hw * hw,int port)415 static void xm_link_down(struct skge_hw *hw, int port)
416 {
417 	struct net_device *dev = hw->dev[port];
418 	struct skge_port *skge = netdev_priv(dev);
419 
420 	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
421 
422 	if (netdev_link_ok(dev))
423 		skge_link_down(skge);
424 }
425 
__xm_phy_read(struct skge_hw * hw,int port,u16 reg,u16 * val)426 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
427 {
428 	int i;
429 
430 	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
431 	*val = xm_read16(hw, port, XM_PHY_DATA);
432 
433 	if (hw->phy_type == SK_PHY_XMAC)
434 		goto ready;
435 
436 	for (i = 0; i < PHY_RETRIES; i++) {
437 		if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
438 			goto ready;
439 		udelay(1);
440 	}
441 
442 	return -ETIMEDOUT;
443  ready:
444 	*val = xm_read16(hw, port, XM_PHY_DATA);
445 
446 	return 0;
447 }
448 
xm_phy_read(struct skge_hw * hw,int port,u16 reg)449 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
450 {
451 	u16 v = 0;
452 	if (__xm_phy_read(hw, port, reg, &v))
453 		DBG(PFX "%s: phy read timed out\n",
454 		       hw->dev[port]->name);
455 	return v;
456 }
457 
xm_phy_write(struct skge_hw * hw,int port,u16 reg,u16 val)458 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
459 {
460 	int i;
461 
462 	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
463 	for (i = 0; i < PHY_RETRIES; i++) {
464 		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
465 			goto ready;
466 		udelay(1);
467 	}
468 	return -EIO;
469 
470  ready:
471 	xm_write16(hw, port, XM_PHY_DATA, val);
472 	for (i = 0; i < PHY_RETRIES; i++) {
473 		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
474 			return 0;
475 		udelay(1);
476 	}
477 	return -ETIMEDOUT;
478 }
479 
genesis_init(struct skge_hw * hw)480 static void genesis_init(struct skge_hw *hw)
481 {
482 	/* set blink source counter */
483 	skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
484 	skge_write8(hw, B2_BSC_CTRL, BSC_START);
485 
486 	/* configure mac arbiter */
487 	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
488 
489 	/* configure mac arbiter timeout values */
490 	skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
491 	skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
492 	skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
493 	skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
494 
495 	skge_write8(hw, B3_MA_RCINI_RX1, 0);
496 	skge_write8(hw, B3_MA_RCINI_RX2, 0);
497 	skge_write8(hw, B3_MA_RCINI_TX1, 0);
498 	skge_write8(hw, B3_MA_RCINI_TX2, 0);
499 
500 	/* configure packet arbiter timeout */
501 	skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
502 	skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
503 	skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
504 	skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
505 	skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
506 }
507 
genesis_reset(struct skge_hw * hw,int port)508 static void genesis_reset(struct skge_hw *hw, int port)
509 {
510 	const u8 zero[8]  = { 0 };
511 	u32 reg;
512 
513 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
514 
515 	/* reset the statistics module */
516 	xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
517 	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
518 	xm_write32(hw, port, XM_MODE, 0);		/* clear Mode Reg */
519 	xm_write16(hw, port, XM_TX_CMD, 0);	/* reset TX CMD Reg */
520 	xm_write16(hw, port, XM_RX_CMD, 0);	/* reset RX CMD Reg */
521 
522 	/* disable Broadcom PHY IRQ */
523 	if (hw->phy_type == SK_PHY_BCOM)
524 		xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
525 
526 	xm_outhash(hw, port, XM_HSM, zero);
527 
528 	/* Flush TX and RX fifo */
529 	reg = xm_read32(hw, port, XM_MODE);
530 	xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
531 	xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
532 }
533 
534 
535 /* Convert mode to MII values  */
536 static const u16 phy_pause_map[] = {
537 	[FLOW_MODE_NONE] =	0,
538 	[FLOW_MODE_LOC_SEND] =	PHY_AN_PAUSE_ASYM,
539 	[FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
540 	[FLOW_MODE_SYM_OR_REM]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
541 };
542 
543 /* special defines for FIBER (88E1011S only) */
544 static const u16 fiber_pause_map[] = {
545 	[FLOW_MODE_NONE]	= PHY_X_P_NO_PAUSE,
546 	[FLOW_MODE_LOC_SEND]	= PHY_X_P_ASYM_MD,
547 	[FLOW_MODE_SYMMETRIC]	= PHY_X_P_SYM_MD,
548 	[FLOW_MODE_SYM_OR_REM]	= PHY_X_P_BOTH_MD,
549 };
550 
551 
552 /* Check status of Broadcom phy link */
bcom_check_link(struct skge_hw * hw,int port)553 static void bcom_check_link(struct skge_hw *hw, int port)
554 {
555 	struct net_device *dev = hw->dev[port];
556 	struct skge_port *skge = netdev_priv(dev);
557 	u16 status;
558 
559 	/* read twice because of latch */
560 	xm_phy_read(hw, port, PHY_BCOM_STAT);
561 	status = xm_phy_read(hw, port, PHY_BCOM_STAT);
562 
563 	if ((status & PHY_ST_LSYNC) == 0) {
564 		xm_link_down(hw, port);
565 		return;
566 	}
567 
568 	if (skge->autoneg == AUTONEG_ENABLE) {
569 		u16 lpa, aux;
570 
571 		if (!(status & PHY_ST_AN_OVER))
572 			return;
573 
574 		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
575 		if (lpa & PHY_B_AN_RF) {
576 			DBG(PFX "%s: remote fault\n",
577 			       dev->name);
578 			return;
579 		}
580 
581 		aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
582 
583 		/* Check Duplex mismatch */
584 		switch (aux & PHY_B_AS_AN_RES_MSK) {
585 		case PHY_B_RES_1000FD:
586 			skge->duplex = DUPLEX_FULL;
587 			break;
588 		case PHY_B_RES_1000HD:
589 			skge->duplex = DUPLEX_HALF;
590 			break;
591 		default:
592 			DBG(PFX "%s: duplex mismatch\n",
593 			       dev->name);
594 			return;
595 		}
596 
597 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
598 		switch (aux & PHY_B_AS_PAUSE_MSK) {
599 		case PHY_B_AS_PAUSE_MSK:
600 			skge->flow_status = FLOW_STAT_SYMMETRIC;
601 			break;
602 		case PHY_B_AS_PRR:
603 			skge->flow_status = FLOW_STAT_REM_SEND;
604 			break;
605 		case PHY_B_AS_PRT:
606 			skge->flow_status = FLOW_STAT_LOC_SEND;
607 			break;
608 		default:
609 			skge->flow_status = FLOW_STAT_NONE;
610 		}
611 		skge->speed = SPEED_1000;
612 	}
613 
614 	if (!netdev_link_ok(dev))
615 		genesis_link_up(skge);
616 }
617 
618 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
619  * Phy on for 100 or 10Mbit operation
620  */
bcom_phy_init(struct skge_port * skge)621 static void bcom_phy_init(struct skge_port *skge)
622 {
623 	struct skge_hw *hw = skge->hw;
624 	int port = skge->port;
625 	unsigned int i;
626 	u16 id1, r, ext, ctl;
627 
628 	/* magic workaround patterns for Broadcom */
629 	static const struct {
630 		u16 reg;
631 		u16 val;
632 	} A1hack[] = {
633 		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
634 		{ 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
635 		{ 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
636 		{ 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
637 	}, C0hack[] = {
638 		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
639 		{ 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
640 	};
641 
642 	/* read Id from external PHY (all have the same address) */
643 	id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
644 
645 	/* Optimize MDIO transfer by suppressing preamble. */
646 	r = xm_read16(hw, port, XM_MMU_CMD);
647 	r |=  XM_MMU_NO_PRE;
648 	xm_write16(hw, port, XM_MMU_CMD,r);
649 
650 	switch (id1) {
651 	case PHY_BCOM_ID1_C0:
652 		/*
653 		 * Workaround BCOM Errata for the C0 type.
654 		 * Write magic patterns to reserved registers.
655 		 */
656 		for (i = 0; i < ARRAY_SIZE(C0hack); i++)
657 			xm_phy_write(hw, port,
658 				     C0hack[i].reg, C0hack[i].val);
659 
660 		break;
661 	case PHY_BCOM_ID1_A1:
662 		/*
663 		 * Workaround BCOM Errata for the A1 type.
664 		 * Write magic patterns to reserved registers.
665 		 */
666 		for (i = 0; i < ARRAY_SIZE(A1hack); i++)
667 			xm_phy_write(hw, port,
668 				     A1hack[i].reg, A1hack[i].val);
669 		break;
670 	}
671 
672 	/*
673 	 * Workaround BCOM Errata (#10523) for all BCom PHYs.
674 	 * Disable Power Management after reset.
675 	 */
676 	r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
677 	r |= PHY_B_AC_DIS_PM;
678 	xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
679 
680 	/* Dummy read */
681 	xm_read16(hw, port, XM_ISRC);
682 
683 	ext = PHY_B_PEC_EN_LTR; /* enable tx led */
684 	ctl = PHY_CT_SP1000;	/* always 1000mbit */
685 
686 	if (skge->autoneg == AUTONEG_ENABLE) {
687 		/*
688 		 * Workaround BCOM Errata #1 for the C5 type.
689 		 * 1000Base-T Link Acquisition Failure in Slave Mode
690 		 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
691 		 */
692 		u16 adv = PHY_B_1000C_RD;
693 		if (skge->advertising & ADVERTISED_1000baseT_Half)
694 			adv |= PHY_B_1000C_AHD;
695 		if (skge->advertising & ADVERTISED_1000baseT_Full)
696 			adv |= PHY_B_1000C_AFD;
697 		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
698 
699 		ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
700 	} else {
701 		if (skge->duplex == DUPLEX_FULL)
702 			ctl |= PHY_CT_DUP_MD;
703 		/* Force to slave */
704 		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
705 	}
706 
707 	/* Set autonegotiation pause parameters */
708 	xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
709 		     phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
710 
711 	xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
712 	xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
713 
714 	/* Use link status change interrupt */
715 	xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
716 }
717 
xm_phy_init(struct skge_port * skge)718 static void xm_phy_init(struct skge_port *skge)
719 {
720 	struct skge_hw *hw = skge->hw;
721 	int port = skge->port;
722 	u16 ctrl = 0;
723 
724 	if (skge->autoneg == AUTONEG_ENABLE) {
725 		if (skge->advertising & ADVERTISED_1000baseT_Half)
726 			ctrl |= PHY_X_AN_HD;
727 		if (skge->advertising & ADVERTISED_1000baseT_Full)
728 			ctrl |= PHY_X_AN_FD;
729 
730 		ctrl |= fiber_pause_map[skge->flow_control];
731 
732 		xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
733 
734 		/* Restart Auto-negotiation */
735 		ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
736 	} else {
737 		/* Set DuplexMode in Config register */
738 		if (skge->duplex == DUPLEX_FULL)
739 			ctrl |= PHY_CT_DUP_MD;
740 		/*
741 		 * Do NOT enable Auto-negotiation here. This would hold
742 		 * the link down because no IDLEs are transmitted
743 		 */
744 	}
745 
746 	xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
747 
748 	/* Poll PHY for status changes */
749 	skge->use_xm_link_timer = 1;
750 }
751 
xm_check_link(struct net_device * dev)752 static int xm_check_link(struct net_device *dev)
753 {
754 	struct skge_port *skge = netdev_priv(dev);
755 	struct skge_hw *hw = skge->hw;
756 	int port = skge->port;
757 	u16 status;
758 
759 	/* read twice because of latch */
760 	xm_phy_read(hw, port, PHY_XMAC_STAT);
761 	status = xm_phy_read(hw, port, PHY_XMAC_STAT);
762 
763 	if ((status & PHY_ST_LSYNC) == 0) {
764 		xm_link_down(hw, port);
765 		return 0;
766 	}
767 
768 	if (skge->autoneg == AUTONEG_ENABLE) {
769 		u16 lpa, res;
770 
771 		if (!(status & PHY_ST_AN_OVER))
772 			return 0;
773 
774 		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
775 		if (lpa & PHY_B_AN_RF) {
776 			DBG(PFX "%s: remote fault\n",
777 			       dev->name);
778 			return 0;
779 		}
780 
781 		res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
782 
783 		/* Check Duplex mismatch */
784 		switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
785 		case PHY_X_RS_FD:
786 			skge->duplex = DUPLEX_FULL;
787 			break;
788 		case PHY_X_RS_HD:
789 			skge->duplex = DUPLEX_HALF;
790 			break;
791 		default:
792 			DBG(PFX "%s: duplex mismatch\n",
793 			       dev->name);
794 			return 0;
795 		}
796 
797 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
798 		if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
799 		     skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
800 		    (lpa & PHY_X_P_SYM_MD))
801 			skge->flow_status = FLOW_STAT_SYMMETRIC;
802 		else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
803 			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
804 			/* Enable PAUSE receive, disable PAUSE transmit */
805 			skge->flow_status  = FLOW_STAT_REM_SEND;
806 		else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
807 			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
808 			/* Disable PAUSE receive, enable PAUSE transmit */
809 			skge->flow_status = FLOW_STAT_LOC_SEND;
810 		else
811 			skge->flow_status = FLOW_STAT_NONE;
812 
813 		skge->speed = SPEED_1000;
814 	}
815 
816 	if (!netdev_link_ok(dev))
817 		genesis_link_up(skge);
818 	return 1;
819 }
820 
821 /* Poll to check for link coming up.
822  *
823  * Since internal PHY is wired to a level triggered pin, can't
824  * get an interrupt when carrier is detected, need to poll for
825  * link coming up.
826  */
xm_link_timer(struct skge_port * skge)827 static void xm_link_timer(struct skge_port *skge)
828 {
829 	struct net_device *dev = skge->netdev;
830 	struct skge_hw *hw = skge->hw;
831 	int port = skge->port;
832 	int i;
833 
834 	/*
835 	 * Verify that the link by checking GPIO register three times.
836 	 * This pin has the signal from the link_sync pin connected to it.
837 	 */
838 	for (i = 0; i < 3; i++) {
839 		if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
840 			return;
841 	}
842 
843         /* Re-enable interrupt to detect link down */
844 	if (xm_check_link(dev)) {
845 		u16 msk = xm_read16(hw, port, XM_IMSK);
846 		msk &= ~XM_IS_INP_ASS;
847 		xm_write16(hw, port, XM_IMSK, msk);
848 		xm_read16(hw, port, XM_ISRC);
849 	}
850 }
851 
genesis_mac_init(struct skge_hw * hw,int port)852 static void genesis_mac_init(struct skge_hw *hw, int port)
853 {
854 	struct net_device *dev = hw->dev[port];
855 	struct skge_port *skge = netdev_priv(dev);
856 	int i;
857 	u32 r;
858 	const u8 zero[6]  = { 0 };
859 
860 	for (i = 0; i < 10; i++) {
861 		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
862 			     MFF_SET_MAC_RST);
863 		if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
864 			goto reset_ok;
865 		udelay(1);
866 	}
867 
868 	DBG(PFX "%s: genesis reset failed\n", dev->name);
869 
870  reset_ok:
871 	/* Unreset the XMAC. */
872 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
873 
874 	/*
875 	 * Perform additional initialization for external PHYs,
876 	 * namely for the 1000baseTX cards that use the XMAC's
877 	 * GMII mode.
878 	 */
879 	if (hw->phy_type != SK_PHY_XMAC) {
880 		/* Take external Phy out of reset */
881 		r = skge_read32(hw, B2_GP_IO);
882 		if (port == 0)
883 			r |= GP_DIR_0|GP_IO_0;
884 		else
885 			r |= GP_DIR_2|GP_IO_2;
886 
887 		skge_write32(hw, B2_GP_IO, r);
888 
889 		/* Enable GMII interface */
890 		xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
891 	}
892 
893 
894 	switch(hw->phy_type) {
895 	case SK_PHY_XMAC:
896 		xm_phy_init(skge);
897 		break;
898 	case SK_PHY_BCOM:
899 		bcom_phy_init(skge);
900 		bcom_check_link(hw, port);
901 	}
902 
903 	/* Set Station Address */
904 	xm_outaddr(hw, port, XM_SA, dev->ll_addr);
905 
906 	/* We don't use match addresses so clear */
907 	for (i = 1; i < 16; i++)
908 		xm_outaddr(hw, port, XM_EXM(i), zero);
909 
910 	/* Clear MIB counters */
911 	xm_write16(hw, port, XM_STAT_CMD,
912 			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
913 	/* Clear two times according to Errata #3 */
914 	xm_write16(hw, port, XM_STAT_CMD,
915 			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
916 
917 	/* configure Rx High Water Mark (XM_RX_HI_WM) */
918 	xm_write16(hw, port, XM_RX_HI_WM, 1450);
919 
920 	/* We don't need the FCS appended to the packet. */
921 	r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
922 
923 	if (skge->duplex == DUPLEX_HALF) {
924 		/*
925 		 * If in manual half duplex mode the other side might be in
926 		 * full duplex mode, so ignore if a carrier extension is not seen
927 		 * on frames received
928 		 */
929 		r |= XM_RX_DIS_CEXT;
930 	}
931 	xm_write16(hw, port, XM_RX_CMD, r);
932 
933 	/* We want short frames padded to 60 bytes. */
934 	xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
935 
936 	xm_write16(hw, port, XM_TX_THR, 512);
937 
938 	/*
939 	 * Enable the reception of all error frames. This is is
940 	 * a necessary evil due to the design of the XMAC. The
941 	 * XMAC's receive FIFO is only 8K in size, however jumbo
942 	 * frames can be up to 9000 bytes in length. When bad
943 	 * frame filtering is enabled, the XMAC's RX FIFO operates
944 	 * in 'store and forward' mode. For this to work, the
945 	 * entire frame has to fit into the FIFO, but that means
946 	 * that jumbo frames larger than 8192 bytes will be
947 	 * truncated. Disabling all bad frame filtering causes
948 	 * the RX FIFO to operate in streaming mode, in which
949 	 * case the XMAC will start transferring frames out of the
950 	 * RX FIFO as soon as the FIFO threshold is reached.
951 	 */
952 	xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
953 
954 
955 	/*
956 	 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
957 	 *	- Enable all bits excepting 'Octets Rx OK Low CntOv'
958 	 *	  and 'Octets Rx OK Hi Cnt Ov'.
959 	 */
960 	xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
961 
962 	/*
963 	 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
964 	 *	- Enable all bits excepting 'Octets Tx OK Low CntOv'
965 	 *	  and 'Octets Tx OK Hi Cnt Ov'.
966 	 */
967 	xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
968 
969 	/* Configure MAC arbiter */
970 	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
971 
972 	/* configure timeout values */
973 	skge_write8(hw, B3_MA_TOINI_RX1, 72);
974 	skge_write8(hw, B3_MA_TOINI_RX2, 72);
975 	skge_write8(hw, B3_MA_TOINI_TX1, 72);
976 	skge_write8(hw, B3_MA_TOINI_TX2, 72);
977 
978 	skge_write8(hw, B3_MA_RCINI_RX1, 0);
979 	skge_write8(hw, B3_MA_RCINI_RX2, 0);
980 	skge_write8(hw, B3_MA_RCINI_TX1, 0);
981 	skge_write8(hw, B3_MA_RCINI_TX2, 0);
982 
983 	/* Configure Rx MAC FIFO */
984 	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
985 	skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
986 	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
987 
988 	/* Configure Tx MAC FIFO */
989 	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
990 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
991 	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
992 
993 	/* enable timeout timers */
994 	skge_write16(hw, B3_PA_CTRL,
995 		     (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
996 }
997 
genesis_stop(struct skge_port * skge)998 static void genesis_stop(struct skge_port *skge)
999 {
1000 	struct skge_hw *hw = skge->hw;
1001 	int port = skge->port;
1002 	unsigned retries = 1000;
1003 	u16 cmd;
1004 
1005 	/* Disable Tx and Rx */
1006 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1007 	cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1008 	xm_write16(hw, port, XM_MMU_CMD, cmd);
1009 
1010 	genesis_reset(hw, port);
1011 
1012 	/* Clear Tx packet arbiter timeout IRQ */
1013 	skge_write16(hw, B3_PA_CTRL,
1014 		     port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1015 
1016 	/* Reset the MAC */
1017 	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1018 	do {
1019 		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1020 		if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
1021 			break;
1022 	} while (--retries > 0);
1023 
1024 	/* For external PHYs there must be special handling */
1025 	if (hw->phy_type != SK_PHY_XMAC) {
1026 		u32 reg = skge_read32(hw, B2_GP_IO);
1027 		if (port == 0) {
1028 			reg |= GP_DIR_0;
1029 			reg &= ~GP_IO_0;
1030 		} else {
1031 			reg |= GP_DIR_2;
1032 			reg &= ~GP_IO_2;
1033 		}
1034 		skge_write32(hw, B2_GP_IO, reg);
1035 		skge_read32(hw, B2_GP_IO);
1036 	}
1037 
1038 	xm_write16(hw, port, XM_MMU_CMD,
1039 			xm_read16(hw, port, XM_MMU_CMD)
1040 			& ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1041 
1042 	xm_read16(hw, port, XM_MMU_CMD);
1043 }
1044 
genesis_link_up(struct skge_port * skge)1045 static void genesis_link_up(struct skge_port *skge)
1046 {
1047 	struct skge_hw *hw = skge->hw;
1048 	int port = skge->port;
1049 	u16 cmd, msk;
1050 	u32 mode;
1051 
1052 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1053 
1054 	/*
1055 	 * enabling pause frame reception is required for 1000BT
1056 	 * because the XMAC is not reset if the link is going down
1057 	 */
1058 	if (skge->flow_status == FLOW_STAT_NONE ||
1059 	    skge->flow_status == FLOW_STAT_LOC_SEND)
1060 		/* Disable Pause Frame Reception */
1061 		cmd |= XM_MMU_IGN_PF;
1062 	else
1063 		/* Enable Pause Frame Reception */
1064 		cmd &= ~XM_MMU_IGN_PF;
1065 
1066 	xm_write16(hw, port, XM_MMU_CMD, cmd);
1067 
1068 	mode = xm_read32(hw, port, XM_MODE);
1069 	if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1070 	    skge->flow_status == FLOW_STAT_LOC_SEND) {
1071 		/*
1072 		 * Configure Pause Frame Generation
1073 		 * Use internal and external Pause Frame Generation.
1074 		 * Sending pause frames is edge triggered.
1075 		 * Send a Pause frame with the maximum pause time if
1076 		 * internal oder external FIFO full condition occurs.
1077 		 * Send a zero pause time frame to re-start transmission.
1078 		 */
1079 		/* XM_PAUSE_DA = '010000C28001' (default) */
1080 		/* XM_MAC_PTIME = 0xffff (maximum) */
1081 		/* remember this value is defined in big endian (!) */
1082 		xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1083 
1084 		mode |= XM_PAUSE_MODE;
1085 		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1086 	} else {
1087 		/*
1088 		 * disable pause frame generation is required for 1000BT
1089 		 * because the XMAC is not reset if the link is going down
1090 		 */
1091 		/* Disable Pause Mode in Mode Register */
1092 		mode &= ~XM_PAUSE_MODE;
1093 
1094 		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1095 	}
1096 
1097 	xm_write32(hw, port, XM_MODE, mode);
1098 
1099 	/* Turn on detection of Tx underrun */
1100 	msk = xm_read16(hw, port, XM_IMSK);
1101 	msk &= ~XM_IS_TXF_UR;
1102 	xm_write16(hw, port, XM_IMSK, msk);
1103 
1104 	xm_read16(hw, port, XM_ISRC);
1105 
1106 	/* get MMU Command Reg. */
1107 	cmd = xm_read16(hw, port, XM_MMU_CMD);
1108 	if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1109 		cmd |= XM_MMU_GMII_FD;
1110 
1111 	/*
1112 	 * Workaround BCOM Errata (#10523) for all BCom Phys
1113 	 * Enable Power Management after link up
1114 	 */
1115 	if (hw->phy_type == SK_PHY_BCOM) {
1116 		xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1117 			     xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1118 			     & ~PHY_B_AC_DIS_PM);
1119 		xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1120 	}
1121 
1122 	/* enable Rx/Tx */
1123 	xm_write16(hw, port, XM_MMU_CMD,
1124 			cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1125 	skge_link_up(skge);
1126 }
1127 
1128 
bcom_phy_intr(struct skge_port * skge)1129 static inline void bcom_phy_intr(struct skge_port *skge)
1130 {
1131 	struct skge_hw *hw = skge->hw;
1132 	int port = skge->port;
1133 	u16 isrc;
1134 
1135 	isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1136 	DBGIO(PFX "%s: phy interrupt status 0x%x\n",
1137 	     skge->netdev->name, isrc);
1138 
1139 	if (isrc & PHY_B_IS_PSE)
1140 		DBG(PFX "%s: uncorrectable pair swap error\n",
1141 		    hw->dev[port]->name);
1142 
1143 	/* Workaround BCom Errata:
1144 	 *	enable and disable loopback mode if "NO HCD" occurs.
1145 	 */
1146 	if (isrc & PHY_B_IS_NO_HDCL) {
1147 		u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1148 		xm_phy_write(hw, port, PHY_BCOM_CTRL,
1149 				  ctrl | PHY_CT_LOOP);
1150 		xm_phy_write(hw, port, PHY_BCOM_CTRL,
1151 				  ctrl & ~PHY_CT_LOOP);
1152 	}
1153 
1154 	if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1155 		bcom_check_link(hw, port);
1156 
1157 }
1158 
gm_phy_write(struct skge_hw * hw,int port,u16 reg,u16 val)1159 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1160 {
1161 	int i;
1162 
1163 	gma_write16(hw, port, GM_SMI_DATA, val);
1164 	gma_write16(hw, port, GM_SMI_CTRL,
1165 			 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1166 	for (i = 0; i < PHY_RETRIES; i++) {
1167 		udelay(1);
1168 
1169 		if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1170 			return 0;
1171 	}
1172 
1173 	DBG(PFX "%s: phy write timeout port %x reg %x val %x\n",
1174 	    hw->dev[port]->name,
1175 	    port, reg, val);
1176 	return -EIO;
1177 }
1178 
__gm_phy_read(struct skge_hw * hw,int port,u16 reg,u16 * val)1179 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1180 {
1181 	int i;
1182 
1183 	gma_write16(hw, port, GM_SMI_CTRL,
1184 			 GM_SMI_CT_PHY_AD(hw->phy_addr)
1185 			 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1186 
1187 	for (i = 0; i < PHY_RETRIES; i++) {
1188 		udelay(1);
1189 		if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1190 			goto ready;
1191 	}
1192 
1193 	return -ETIMEDOUT;
1194  ready:
1195 	*val = gma_read16(hw, port, GM_SMI_DATA);
1196 	return 0;
1197 }
1198 
gm_phy_read(struct skge_hw * hw,int port,u16 reg)1199 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1200 {
1201 	u16 v = 0;
1202 	if (__gm_phy_read(hw, port, reg, &v))
1203 		DBG(PFX "%s: phy read timeout port %x reg %x val %x\n",
1204 	       hw->dev[port]->name,
1205 	       port, reg, v);
1206 	return v;
1207 }
1208 
1209 /* Marvell Phy Initialization */
yukon_init(struct skge_hw * hw,int port)1210 static void yukon_init(struct skge_hw *hw, int port)
1211 {
1212 	struct skge_port *skge = netdev_priv(hw->dev[port]);
1213 	u16 ctrl, ct1000, adv;
1214 
1215 	if (skge->autoneg == AUTONEG_ENABLE) {
1216 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1217 
1218 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1219 			  PHY_M_EC_MAC_S_MSK);
1220 		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1221 
1222 		ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1223 
1224 		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1225 	}
1226 
1227 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1228 	if (skge->autoneg == AUTONEG_DISABLE)
1229 		ctrl &= ~PHY_CT_ANE;
1230 
1231 	ctrl |= PHY_CT_RESET;
1232 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1233 
1234 	ctrl = 0;
1235 	ct1000 = 0;
1236 	adv = PHY_AN_CSMA;
1237 
1238 	if (skge->autoneg == AUTONEG_ENABLE) {
1239 		if (hw->copper) {
1240 			if (skge->advertising & ADVERTISED_1000baseT_Full)
1241 				ct1000 |= PHY_M_1000C_AFD;
1242 			if (skge->advertising & ADVERTISED_1000baseT_Half)
1243 				ct1000 |= PHY_M_1000C_AHD;
1244 			if (skge->advertising & ADVERTISED_100baseT_Full)
1245 				adv |= PHY_M_AN_100_FD;
1246 			if (skge->advertising & ADVERTISED_100baseT_Half)
1247 				adv |= PHY_M_AN_100_HD;
1248 			if (skge->advertising & ADVERTISED_10baseT_Full)
1249 				adv |= PHY_M_AN_10_FD;
1250 			if (skge->advertising & ADVERTISED_10baseT_Half)
1251 				adv |= PHY_M_AN_10_HD;
1252 
1253 			/* Set Flow-control capabilities */
1254 			adv |= phy_pause_map[skge->flow_control];
1255 		} else {
1256 			if (skge->advertising & ADVERTISED_1000baseT_Full)
1257 				adv |= PHY_M_AN_1000X_AFD;
1258 			if (skge->advertising & ADVERTISED_1000baseT_Half)
1259 				adv |= PHY_M_AN_1000X_AHD;
1260 
1261 			adv |= fiber_pause_map[skge->flow_control];
1262 		}
1263 
1264 		/* Restart Auto-negotiation */
1265 		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1266 	} else {
1267 		/* forced speed/duplex settings */
1268 		ct1000 = PHY_M_1000C_MSE;
1269 
1270 		if (skge->duplex == DUPLEX_FULL)
1271 			ctrl |= PHY_CT_DUP_MD;
1272 
1273 		switch (skge->speed) {
1274 		case SPEED_1000:
1275 			ctrl |= PHY_CT_SP1000;
1276 			break;
1277 		case SPEED_100:
1278 			ctrl |= PHY_CT_SP100;
1279 			break;
1280 		}
1281 
1282 		ctrl |= PHY_CT_RESET;
1283 	}
1284 
1285 	gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1286 
1287 	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1288 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1289 
1290 	/* Enable phy interrupt on autonegotiation complete (or link up) */
1291 	if (skge->autoneg == AUTONEG_ENABLE)
1292 		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
1293 	else
1294 		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1295 }
1296 
yukon_reset(struct skge_hw * hw,int port)1297 static void yukon_reset(struct skge_hw *hw, int port)
1298 {
1299 	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1300 	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
1301 	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1302 	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1303 	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1304 
1305 	gma_write16(hw, port, GM_RX_CTRL,
1306 			 gma_read16(hw, port, GM_RX_CTRL)
1307 			 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1308 }
1309 
1310 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
is_yukon_lite_a0(struct skge_hw * hw)1311 static int is_yukon_lite_a0(struct skge_hw *hw)
1312 {
1313 	u32 reg;
1314 	int ret;
1315 
1316 	if (hw->chip_id != CHIP_ID_YUKON)
1317 		return 0;
1318 
1319 	reg = skge_read32(hw, B2_FAR);
1320 	skge_write8(hw, B2_FAR + 3, 0xff);
1321 	ret = (skge_read8(hw, B2_FAR + 3) != 0);
1322 	skge_write32(hw, B2_FAR, reg);
1323 	return ret;
1324 }
1325 
yukon_mac_init(struct skge_hw * hw,int port)1326 static void yukon_mac_init(struct skge_hw *hw, int port)
1327 {
1328 	struct skge_port *skge = netdev_priv(hw->dev[port]);
1329 	int i;
1330 	u32 reg;
1331 	const u8 *addr = hw->dev[port]->ll_addr;
1332 
1333 	/* WA code for COMA mode -- set PHY reset */
1334 	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1335 	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1336 		reg = skge_read32(hw, B2_GP_IO);
1337 		reg |= GP_DIR_9 | GP_IO_9;
1338 		skge_write32(hw, B2_GP_IO, reg);
1339 	}
1340 
1341 	/* hard reset */
1342 	skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1343 	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1344 
1345 	/* WA code for COMA mode -- clear PHY reset */
1346 	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1347 	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1348 		reg = skge_read32(hw, B2_GP_IO);
1349 		reg |= GP_DIR_9;
1350 		reg &= ~GP_IO_9;
1351 		skge_write32(hw, B2_GP_IO, reg);
1352 	}
1353 
1354 	/* Set hardware config mode */
1355 	reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1356 		GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1357 	reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1358 
1359 	/* Clear GMC reset */
1360 	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1361 	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1362 	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1363 
1364 	if (skge->autoneg == AUTONEG_DISABLE) {
1365 		reg = GM_GPCR_AU_ALL_DIS;
1366 		gma_write16(hw, port, GM_GP_CTRL,
1367 				 gma_read16(hw, port, GM_GP_CTRL) | reg);
1368 
1369 		switch (skge->speed) {
1370 		case SPEED_1000:
1371 			reg &= ~GM_GPCR_SPEED_100;
1372 			reg |= GM_GPCR_SPEED_1000;
1373 			break;
1374 		case SPEED_100:
1375 			reg &= ~GM_GPCR_SPEED_1000;
1376 			reg |= GM_GPCR_SPEED_100;
1377 			break;
1378 		case SPEED_10:
1379 			reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1380 			break;
1381 		}
1382 
1383 		if (skge->duplex == DUPLEX_FULL)
1384 			reg |= GM_GPCR_DUP_FULL;
1385 	} else
1386 		reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1387 
1388 	switch (skge->flow_control) {
1389 	case FLOW_MODE_NONE:
1390 		skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1391 		reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1392 		break;
1393 	case FLOW_MODE_LOC_SEND:
1394 		/* disable Rx flow-control */
1395 		reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1396 		break;
1397 	case FLOW_MODE_SYMMETRIC:
1398 	case FLOW_MODE_SYM_OR_REM:
1399 		/* enable Tx & Rx flow-control */
1400 		break;
1401 	}
1402 
1403 	gma_write16(hw, port, GM_GP_CTRL, reg);
1404 	skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1405 
1406 	yukon_init(hw, port);
1407 
1408 	/* MIB clear */
1409 	reg = gma_read16(hw, port, GM_PHY_ADDR);
1410 	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1411 
1412 	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1413 		gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1414 	gma_write16(hw, port, GM_PHY_ADDR, reg);
1415 
1416 	/* transmit control */
1417 	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1418 
1419 	/* receive control reg: unicast + multicast + no FCS  */
1420 	gma_write16(hw, port, GM_RX_CTRL,
1421 			 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1422 
1423 	/* transmit flow control */
1424 	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1425 
1426 	/* transmit parameter */
1427 	gma_write16(hw, port, GM_TX_PARAM,
1428 			 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1429 			 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1430 			 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1431 
1432 	/* configure the Serial Mode Register */
1433 	reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
1434 		| GM_SMOD_VLAN_ENA
1435 		| IPG_DATA_VAL(IPG_DATA_DEF);
1436 
1437 	gma_write16(hw, port, GM_SERIAL_MODE, reg);
1438 
1439 	/* physical address: used for pause frames */
1440 	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1441 	/* virtual address for data */
1442 	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1443 
1444 	/* enable interrupt mask for counter overflows */
1445 	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1446 	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1447 	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1448 
1449 	/* Initialize Mac Fifo */
1450 
1451 	/* Configure Rx MAC FIFO */
1452 	skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1453 	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1454 
1455 	/* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1456 	if (is_yukon_lite_a0(hw))
1457 		reg &= ~GMF_RX_F_FL_ON;
1458 
1459 	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1460 	skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1461 	/*
1462 	 * because Pause Packet Truncation in GMAC is not working
1463 	 * we have to increase the Flush Threshold to 64 bytes
1464 	 * in order to flush pause packets in Rx FIFO on Yukon-1
1465 	 */
1466 	skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1467 
1468 	/* Configure Tx MAC FIFO */
1469 	skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1470 	skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1471 }
1472 
1473 /* Go into power down mode */
yukon_suspend(struct skge_hw * hw,int port)1474 static void yukon_suspend(struct skge_hw *hw, int port)
1475 {
1476 	u16 ctrl;
1477 
1478 	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
1479 	ctrl |= PHY_M_PC_POL_R_DIS;
1480 	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
1481 
1482 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1483 	ctrl |= PHY_CT_RESET;
1484 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1485 
1486 	/* switch IEEE compatible power down mode on */
1487 	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1488 	ctrl |= PHY_CT_PDOWN;
1489 	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1490 }
1491 
yukon_stop(struct skge_port * skge)1492 static void yukon_stop(struct skge_port *skge)
1493 {
1494 	struct skge_hw *hw = skge->hw;
1495 	int port = skge->port;
1496 
1497 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1498 	yukon_reset(hw, port);
1499 
1500 	gma_write16(hw, port, GM_GP_CTRL,
1501 			 gma_read16(hw, port, GM_GP_CTRL)
1502 			 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1503 	gma_read16(hw, port, GM_GP_CTRL);
1504 
1505 	yukon_suspend(hw, port);
1506 
1507 	/* set GPHY Control reset */
1508 	skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1509 	skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1510 }
1511 
yukon_speed(const struct skge_hw * hw __unused,u16 aux)1512 static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
1513 {
1514 	switch (aux & PHY_M_PS_SPEED_MSK) {
1515 	case PHY_M_PS_SPEED_1000:
1516 		return SPEED_1000;
1517 	case PHY_M_PS_SPEED_100:
1518 		return SPEED_100;
1519 	default:
1520 		return SPEED_10;
1521 	}
1522 }
1523 
yukon_link_up(struct skge_port * skge)1524 static void yukon_link_up(struct skge_port *skge)
1525 {
1526 	struct skge_hw *hw = skge->hw;
1527 	int port = skge->port;
1528 	u16 reg;
1529 
1530 	/* Enable Transmit FIFO Underrun */
1531 	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1532 
1533 	reg = gma_read16(hw, port, GM_GP_CTRL);
1534 	if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1535 		reg |= GM_GPCR_DUP_FULL;
1536 
1537 	/* enable Rx/Tx */
1538 	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1539 	gma_write16(hw, port, GM_GP_CTRL, reg);
1540 
1541 	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1542 	skge_link_up(skge);
1543 }
1544 
yukon_link_down(struct skge_port * skge)1545 static void yukon_link_down(struct skge_port *skge)
1546 {
1547 	struct skge_hw *hw = skge->hw;
1548 	int port = skge->port;
1549 	u16 ctrl;
1550 
1551 	ctrl = gma_read16(hw, port, GM_GP_CTRL);
1552 	ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1553 	gma_write16(hw, port, GM_GP_CTRL, ctrl);
1554 
1555 	if (skge->flow_status == FLOW_STAT_REM_SEND) {
1556 		ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1557 		ctrl |= PHY_M_AN_ASP;
1558 		/* restore Asymmetric Pause bit */
1559 		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
1560 	}
1561 
1562 	skge_link_down(skge);
1563 
1564 	yukon_init(hw, port);
1565 }
1566 
yukon_phy_intr(struct skge_port * skge)1567 static void yukon_phy_intr(struct skge_port *skge)
1568 {
1569 	struct skge_hw *hw = skge->hw;
1570 	int port = skge->port;
1571 	const char *reason = NULL;
1572 	u16 istatus, phystat;
1573 
1574 	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1575 	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1576 
1577 	DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1578 	     skge->netdev->name, istatus, phystat);
1579 
1580 	if (istatus & PHY_M_IS_AN_COMPL) {
1581 		if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
1582 		    & PHY_M_AN_RF) {
1583 			reason = "remote fault";
1584 			goto failed;
1585 		}
1586 
1587 		if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1588 			reason = "master/slave fault";
1589 			goto failed;
1590 		}
1591 
1592 		if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1593 			reason = "speed/duplex";
1594 			goto failed;
1595 		}
1596 
1597 		skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1598 			? DUPLEX_FULL : DUPLEX_HALF;
1599 		skge->speed = yukon_speed(hw, phystat);
1600 
1601 		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
1602 		switch (phystat & PHY_M_PS_PAUSE_MSK) {
1603 		case PHY_M_PS_PAUSE_MSK:
1604 			skge->flow_status = FLOW_STAT_SYMMETRIC;
1605 			break;
1606 		case PHY_M_PS_RX_P_EN:
1607 			skge->flow_status = FLOW_STAT_REM_SEND;
1608 			break;
1609 		case PHY_M_PS_TX_P_EN:
1610 			skge->flow_status = FLOW_STAT_LOC_SEND;
1611 			break;
1612 		default:
1613 			skge->flow_status = FLOW_STAT_NONE;
1614 		}
1615 
1616 		if (skge->flow_status == FLOW_STAT_NONE ||
1617 		    (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1618 			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1619 		else
1620 			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1621 		yukon_link_up(skge);
1622 		return;
1623 	}
1624 
1625 	if (istatus & PHY_M_IS_LSP_CHANGE)
1626 		skge->speed = yukon_speed(hw, phystat);
1627 
1628 	if (istatus & PHY_M_IS_DUP_CHANGE)
1629 		skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1630 	if (istatus & PHY_M_IS_LST_CHANGE) {
1631 		if (phystat & PHY_M_PS_LINK_UP)
1632 			yukon_link_up(skge);
1633 		else
1634 			yukon_link_down(skge);
1635 	}
1636 	return;
1637  failed:
1638 	DBG(PFX "%s: autonegotiation failed (%s)\n",
1639 	       skge->netdev->name, reason);
1640 
1641 	/* XXX restart autonegotiation? */
1642 }
1643 
skge_ramset(struct skge_hw * hw,u16 q,u32 start,size_t len)1644 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
1645 {
1646 	u32 end;
1647 
1648 	start /= 8;
1649 	len /= 8;
1650 	end = start + len - 1;
1651 
1652 	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1653 	skge_write32(hw, RB_ADDR(q, RB_START), start);
1654 	skge_write32(hw, RB_ADDR(q, RB_WP), start);
1655 	skge_write32(hw, RB_ADDR(q, RB_RP), start);
1656 	skge_write32(hw, RB_ADDR(q, RB_END), end);
1657 
1658 	if (q == Q_R1 || q == Q_R2) {
1659 		/* Set thresholds on receive queue's */
1660 		skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
1661 			     start + (2*len)/3);
1662 		skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
1663 			     start + (len/3));
1664 	} else {
1665 		/* Enable store & forward on Tx queue's because
1666 		 * Tx FIFO is only 4K on Genesis and 1K on Yukon
1667 		 */
1668 		skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1669 	}
1670 
1671 	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1672 }
1673 
1674 /* Setup Bus Memory Interface */
skge_qset(struct skge_port * skge,u16 q,const struct skge_element * e)1675 static void skge_qset(struct skge_port *skge, u16 q,
1676 		      const struct skge_element *e)
1677 {
1678 	struct skge_hw *hw = skge->hw;
1679 	u32 watermark = 0x600;
1680 	u64 base = skge->dma + (e->desc - skge->mem);
1681 
1682 	/* optimization to reduce window on 32bit/33mhz */
1683 	if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
1684 		watermark /= 2;
1685 
1686 	skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
1687 	skge_write32(hw, Q_ADDR(q, Q_F), watermark);
1688 	skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
1689 	skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
1690 }
1691 
skge_free(struct net_device * dev)1692 void skge_free(struct net_device *dev)
1693 {
1694 	struct skge_port *skge = netdev_priv(dev);
1695 
1696 	free(skge->rx_ring.start);
1697 	skge->rx_ring.start = NULL;
1698 
1699 	free(skge->tx_ring.start);
1700 	skge->tx_ring.start = NULL;
1701 
1702 	free_dma(skge->mem, RING_SIZE);
1703 	skge->mem = NULL;
1704 	skge->dma = 0;
1705 }
1706 
skge_up(struct net_device * dev)1707 static int skge_up(struct net_device *dev)
1708 {
1709 	struct skge_port *skge = netdev_priv(dev);
1710 	struct skge_hw *hw = skge->hw;
1711 	int port = skge->port;
1712 	u32 chunk, ram_addr;
1713 	int err;
1714 
1715 	DBG2(PFX "%s: enabling interface\n", dev->name);
1716 
1717 	skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
1718 	skge->dma = virt_to_bus(skge->mem);
1719 	if (!skge->mem)
1720 		return -ENOMEM;
1721 	memset(skge->mem, 0, RING_SIZE);
1722 
1723 	assert(!(skge->dma & 7));
1724 
1725 	/* FIXME: find out whether 64 bit iPXE will be loaded > 4GB */
1726 	if ((u64)skge->dma >> 32 != ((u64) skge->dma + RING_SIZE) >> 32) {
1727 		DBG(PFX "pci_alloc_consistent region crosses 4G boundary\n");
1728 		err = -EINVAL;
1729 		goto err;
1730 	}
1731 
1732 	err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma, NUM_RX_DESC);
1733 	if (err)
1734 		goto err;
1735 
1736 	/* this call relies on e->iob and d->control to be 0
1737 	 * This is assured by calling memset() on skge->mem and using zalloc()
1738 	 * for the skge_element structures.
1739 	 */
1740 	skge_rx_refill(dev);
1741 
1742 	err = skge_ring_alloc(&skge->tx_ring, skge->mem + RX_RING_SIZE,
1743 			      skge->dma + RX_RING_SIZE, NUM_TX_DESC);
1744 	if (err)
1745 		goto err;
1746 
1747 	/* Initialize MAC */
1748 	if (hw->chip_id == CHIP_ID_GENESIS)
1749 		genesis_mac_init(hw, port);
1750 	else
1751 		yukon_mac_init(hw, port);
1752 
1753 	/* Configure RAMbuffers - equally between ports and tx/rx */
1754 	chunk = (hw->ram_size  - hw->ram_offset) / (hw->ports * 2);
1755 	ram_addr = hw->ram_offset + 2 * chunk * port;
1756 
1757 	skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
1758 	skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
1759 
1760 	assert(!(skge->tx_ring.to_use != skge->tx_ring.to_clean));
1761 	skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
1762 	skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
1763 
1764 	/* Start receiver BMU */
1765 	wmb();
1766 	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
1767 	skge_led(skge, LED_MODE_ON);
1768 
1769 	hw->intr_mask |= portmask[port];
1770 	skge_write32(hw, B0_IMSK, hw->intr_mask);
1771 
1772 	return 0;
1773 
1774  err:
1775 	skge_rx_clean(skge);
1776 	skge_free(dev);
1777 
1778 	return err;
1779 }
1780 
1781 /* stop receiver */
skge_rx_stop(struct skge_hw * hw,int port)1782 static void skge_rx_stop(struct skge_hw *hw, int port)
1783 {
1784 	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
1785 	skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
1786 		     RB_RST_SET|RB_DIS_OP_MD);
1787 	skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
1788 }
1789 
skge_down(struct net_device * dev)1790 static void skge_down(struct net_device *dev)
1791 {
1792 	struct skge_port *skge = netdev_priv(dev);
1793 	struct skge_hw *hw = skge->hw;
1794 	int port = skge->port;
1795 
1796 	if (skge->mem == NULL)
1797 		return;
1798 
1799 	DBG2(PFX "%s: disabling interface\n", dev->name);
1800 
1801 	if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
1802 		skge->use_xm_link_timer = 0;
1803 
1804 	netdev_link_down(dev);
1805 
1806 	hw->intr_mask &= ~portmask[port];
1807 	skge_write32(hw, B0_IMSK, hw->intr_mask);
1808 
1809 	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1810 	if (hw->chip_id == CHIP_ID_GENESIS)
1811 		genesis_stop(skge);
1812 	else
1813 		yukon_stop(skge);
1814 
1815 	/* Stop transmitter */
1816 	skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
1817 	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1818 		     RB_RST_SET|RB_DIS_OP_MD);
1819 
1820 
1821 	/* Disable Force Sync bit and Enable Alloc bit */
1822 	skge_write8(hw, SK_REG(port, TXA_CTRL),
1823 		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1824 
1825 	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
1826 	skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1827 	skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1828 
1829 	/* Reset PCI FIFO */
1830 	skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
1831 	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1832 
1833 	/* Reset the RAM Buffer async Tx queue */
1834 	skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
1835 
1836 	skge_rx_stop(hw, port);
1837 
1838 	if (hw->chip_id == CHIP_ID_GENESIS) {
1839 		skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
1840 		skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
1841 	} else {
1842 		skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1843 		skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1844 	}
1845 
1846 	skge_led(skge, LED_MODE_OFF);
1847 
1848 	skge_tx_clean(dev);
1849 
1850 	skge_rx_clean(skge);
1851 
1852 	skge_free(dev);
1853 	return;
1854 }
1855 
skge_tx_avail(const struct skge_ring * ring)1856 static inline int skge_tx_avail(const struct skge_ring *ring)
1857 {
1858 	mb();
1859 	return ((ring->to_clean > ring->to_use) ? 0 : NUM_TX_DESC)
1860 		+ (ring->to_clean - ring->to_use) - 1;
1861 }
1862 
skge_xmit_frame(struct net_device * dev,struct io_buffer * iob)1863 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1864 {
1865 	struct skge_port *skge = netdev_priv(dev);
1866 	struct skge_hw *hw = skge->hw;
1867 	struct skge_element *e;
1868 	struct skge_tx_desc *td;
1869 	u32 control, len;
1870 	u64 map;
1871 
1872 	if (skge_tx_avail(&skge->tx_ring) < 1)
1873 		return -EBUSY;
1874 
1875 	e = skge->tx_ring.to_use;
1876 	td = e->desc;
1877 	assert(!(td->control & BMU_OWN));
1878 	e->iob = iob;
1879 	len = iob_len(iob);
1880 	map = virt_to_bus(iob->data);
1881 
1882 	td->dma_lo = map;
1883 	td->dma_hi = map >> 32;
1884 
1885 	control = BMU_CHECK;
1886 
1887 	control |= BMU_EOF| BMU_IRQ_EOF;
1888 	/* Make sure all the descriptors written */
1889 	wmb();
1890 	td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
1891 	wmb();
1892 
1893 	skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
1894 
1895 	DBGIO(PFX "%s: tx queued, slot %td, len %d\n",
1896 	     dev->name, e - skge->tx_ring.start, (unsigned int)len);
1897 
1898 	skge->tx_ring.to_use = e->next;
1899 	wmb();
1900 
1901 	if (skge_tx_avail(&skge->tx_ring) <= 1) {
1902 		DBG(PFX "%s: transmit queue full\n", dev->name);
1903 	}
1904 
1905 	return 0;
1906 }
1907 
1908 /* Free all buffers in transmit ring */
skge_tx_clean(struct net_device * dev)1909 static void skge_tx_clean(struct net_device *dev)
1910 {
1911 	struct skge_port *skge = netdev_priv(dev);
1912 	struct skge_element *e;
1913 
1914 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
1915 		struct skge_tx_desc *td = e->desc;
1916 		td->control = 0;
1917 	}
1918 
1919 	skge->tx_ring.to_clean = e;
1920 }
1921 
phy_length(const struct skge_hw * hw,u32 status)1922 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
1923 {
1924 	if (hw->chip_id == CHIP_ID_GENESIS)
1925 		return status >> XMR_FS_LEN_SHIFT;
1926 	else
1927 		return status >> GMR_FS_LEN_SHIFT;
1928 }
1929 
bad_phy_status(const struct skge_hw * hw,u32 status)1930 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
1931 {
1932 	if (hw->chip_id == CHIP_ID_GENESIS)
1933 		return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
1934 	else
1935 		return (status & GMR_FS_ANY_ERR) ||
1936 			(status & GMR_FS_RX_OK) == 0;
1937 }
1938 
1939 /* Free all buffers in Tx ring which are no longer owned by device */
skge_tx_done(struct net_device * dev)1940 static void skge_tx_done(struct net_device *dev)
1941 {
1942 	struct skge_port *skge = netdev_priv(dev);
1943 	struct skge_ring *ring = &skge->tx_ring;
1944 	struct skge_element *e;
1945 
1946 	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
1947 
1948 	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
1949 		u32 control = ((const struct skge_tx_desc *) e->desc)->control;
1950 
1951 		if (control & BMU_OWN)
1952 			break;
1953 
1954 		netdev_tx_complete(dev, e->iob);
1955 	}
1956 	skge->tx_ring.to_clean = e;
1957 
1958 	/* Can run lockless until we need to synchronize to restart queue. */
1959 	mb();
1960 }
1961 
skge_rx_refill(struct net_device * dev)1962 static void skge_rx_refill(struct net_device *dev)
1963 {
1964 	struct skge_port *skge = netdev_priv(dev);
1965 	struct skge_ring *ring = &skge->rx_ring;
1966 	struct skge_element *e;
1967 	struct io_buffer *iob;
1968 	struct skge_rx_desc *rd;
1969 	u32 control;
1970 	int i;
1971 
1972 	for (i = 0; i < NUM_RX_DESC; i++) {
1973 		e = ring->to_clean;
1974 		rd = e->desc;
1975 		iob = e->iob;
1976 		control = rd->control;
1977 
1978 		/* nothing to do here */
1979 		if (iob || (control & BMU_OWN))
1980 			continue;
1981 
1982 		DBG2("refilling rx desc %zd: ", (ring->to_clean - ring->start));
1983 
1984 		iob = alloc_iob(RX_BUF_SIZE);
1985 		if (iob) {
1986 			skge_rx_setup(skge, e, iob, RX_BUF_SIZE);
1987 		} else {
1988 			DBG("descr %zd: alloc_iob() failed\n",
1989 			     (ring->to_clean - ring->start));
1990 			/* We pass the descriptor to the NIC even if the
1991 			 * allocation failed. The card will stop as soon as it
1992 			 * encounters a descriptor with the OWN bit set to 0,
1993 			 * thus never getting to the next descriptor that might
1994 			 * contain a valid io_buffer. This would effectively
1995 			 * stall the receive.
1996 			 */
1997 			skge_rx_setup(skge, e, NULL, 0);
1998 		}
1999 
2000 		ring->to_clean = e->next;
2001 	}
2002 }
2003 
skge_rx_done(struct net_device * dev)2004 static void skge_rx_done(struct net_device *dev)
2005 {
2006 	struct skge_port *skge = netdev_priv(dev);
2007 	struct skge_ring *ring = &skge->rx_ring;
2008 	struct skge_rx_desc *rd;
2009 	struct skge_element *e;
2010 	struct io_buffer *iob;
2011 	u32 control;
2012 	u16 len;
2013 	int i;
2014 
2015 	e = ring->to_clean;
2016 	for (i = 0; i < NUM_RX_DESC; i++) {
2017 		iob = e->iob;
2018 		rd = e->desc;
2019 
2020 		rmb();
2021 		control = rd->control;
2022 
2023 		if ((control & BMU_OWN))
2024 			break;
2025 
2026 		if (!iob)
2027 			continue;
2028 
2029 		len = control & BMU_BBC;
2030 
2031 		/* catch RX errors */
2032 		if ((bad_phy_status(skge->hw, rd->status)) ||
2033 		   (phy_length(skge->hw, rd->status) != len)) {
2034 			/* report receive errors */
2035 			DBG("rx error\n");
2036 			netdev_rx_err(dev, iob, -EIO);
2037 		} else {
2038 			DBG2("received packet, len %d\n", len);
2039 			iob_put(iob, len);
2040 			netdev_rx(dev, iob);
2041 		}
2042 
2043 		/* io_buffer passed to core, make sure we don't reuse it */
2044 		e->iob = NULL;
2045 
2046 		e = e->next;
2047 	}
2048 	skge_rx_refill(dev);
2049 }
2050 
skge_poll(struct net_device * dev)2051 static void skge_poll(struct net_device *dev)
2052 {
2053 	struct skge_port *skge = netdev_priv(dev);
2054 	struct skge_hw *hw = skge->hw;
2055 	u32 status;
2056 
2057 	/* reading this register ACKs interrupts */
2058 	status = skge_read32(hw, B0_SP_ISRC);
2059 
2060 	/* Link event? */
2061 	if (status & IS_EXT_REG) {
2062 		skge_phyirq(hw);
2063 		if (skge->use_xm_link_timer)
2064 			xm_link_timer(skge);
2065 	}
2066 
2067 	skge_tx_done(dev);
2068 
2069 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2070 
2071 	skge_rx_done(dev);
2072 
2073 	/* restart receiver */
2074 	wmb();
2075 	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2076 
2077 	skge_read32(hw, B0_IMSK);
2078 
2079 	return;
2080 }
2081 
skge_phyirq(struct skge_hw * hw)2082 static void skge_phyirq(struct skge_hw *hw)
2083 {
2084 	int port;
2085 
2086 	for (port = 0; port < hw->ports; port++) {
2087 		struct net_device *dev = hw->dev[port];
2088 		struct skge_port *skge = netdev_priv(dev);
2089 
2090 		if (hw->chip_id != CHIP_ID_GENESIS)
2091 			yukon_phy_intr(skge);
2092 		else if (hw->phy_type == SK_PHY_BCOM)
2093 			bcom_phy_intr(skge);
2094 	}
2095 
2096 	hw->intr_mask |= IS_EXT_REG;
2097 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2098 	skge_read32(hw, B0_IMSK);
2099 }
2100 
2101 static const struct {
2102 	u8 id;
2103 	const char *name;
2104 } skge_chips[] = {
2105 	{ CHIP_ID_GENESIS,	"Genesis" },
2106 	{ CHIP_ID_YUKON,	 "Yukon" },
2107 	{ CHIP_ID_YUKON_LITE,	 "Yukon-Lite"},
2108 	{ CHIP_ID_YUKON_LP,	 "Yukon-LP"},
2109 };
2110 
skge_board_name(const struct skge_hw * hw)2111 static const char *skge_board_name(const struct skge_hw *hw)
2112 {
2113 	unsigned int i;
2114 	static char buf[16];
2115 
2116 	for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2117 		if (skge_chips[i].id == hw->chip_id)
2118 			return skge_chips[i].name;
2119 
2120 	snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2121 	return buf;
2122 }
2123 
2124 
2125 /*
2126  * Setup the board data structure, but don't bring up
2127  * the port(s)
2128  */
skge_reset(struct skge_hw * hw)2129 static int skge_reset(struct skge_hw *hw)
2130 {
2131 	u32 reg;
2132 	u16 ctst, pci_status;
2133 	u8 t8, mac_cfg, pmd_type;
2134 	int i;
2135 
2136 	ctst = skge_read16(hw, B0_CTST);
2137 
2138 	/* do a SW reset */
2139 	skge_write8(hw, B0_CTST, CS_RST_SET);
2140 	skge_write8(hw, B0_CTST, CS_RST_CLR);
2141 
2142 	/* clear PCI errors, if any */
2143 	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2144 	skge_write8(hw, B2_TST_CTRL2, 0);
2145 
2146 	pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2147 	pci_write_config_word(hw->pdev, PCI_STATUS,
2148 			      pci_status | PCI_STATUS_ERROR_BITS);
2149 	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2150 	skge_write8(hw, B0_CTST, CS_MRST_CLR);
2151 
2152 	/* restore CLK_RUN bits (for Yukon-Lite) */
2153 	skge_write16(hw, B0_CTST,
2154 		     ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2155 
2156 	hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2157 	hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2158 	pmd_type = skge_read8(hw, B2_PMD_TYP);
2159 	hw->copper = (pmd_type == 'T' || pmd_type == '1');
2160 
2161 	switch (hw->chip_id) {
2162 	case CHIP_ID_GENESIS:
2163 		switch (hw->phy_type) {
2164 		case SK_PHY_XMAC:
2165 			hw->phy_addr = PHY_ADDR_XMAC;
2166 			break;
2167 		case SK_PHY_BCOM:
2168 			hw->phy_addr = PHY_ADDR_BCOM;
2169 			break;
2170 		default:
2171 			DBG(PFX "unsupported phy type 0x%x\n",
2172 			       hw->phy_type);
2173 			return -EOPNOTSUPP;
2174 		}
2175 		break;
2176 
2177 	case CHIP_ID_YUKON:
2178 	case CHIP_ID_YUKON_LITE:
2179 	case CHIP_ID_YUKON_LP:
2180 		if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2181 			hw->copper = 1;
2182 
2183 		hw->phy_addr = PHY_ADDR_MARV;
2184 		break;
2185 
2186 	default:
2187 		DBG(PFX "unsupported chip type 0x%x\n",
2188 		       hw->chip_id);
2189 		return -EOPNOTSUPP;
2190 	}
2191 
2192 	mac_cfg = skge_read8(hw, B2_MAC_CFG);
2193 	hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2194 	hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2195 
2196 	/* read the adapters RAM size */
2197 	t8 = skge_read8(hw, B2_E_0);
2198 	if (hw->chip_id == CHIP_ID_GENESIS) {
2199 		if (t8 == 3) {
2200 			/* special case: 4 x 64k x 36, offset = 0x80000 */
2201 			hw->ram_size = 0x100000;
2202 			hw->ram_offset = 0x80000;
2203 		} else
2204 			hw->ram_size = t8 * 512;
2205 	}
2206 	else if (t8 == 0)
2207 		hw->ram_size = 0x20000;
2208 	else
2209 		hw->ram_size = t8 * 4096;
2210 
2211 	hw->intr_mask = IS_HW_ERR;
2212 
2213 	/* Use PHY IRQ for all but fiber based Genesis board */
2214 	if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
2215 		hw->intr_mask |= IS_EXT_REG;
2216 
2217 	if (hw->chip_id == CHIP_ID_GENESIS)
2218 		genesis_init(hw);
2219 	else {
2220 		/* switch power to VCC (WA for VAUX problem) */
2221 		skge_write8(hw, B0_POWER_CTRL,
2222 			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2223 
2224 		/* avoid boards with stuck Hardware error bits */
2225 		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2226 		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2227 			DBG(PFX "stuck hardware sensor bit\n");
2228 			hw->intr_mask &= ~IS_HW_ERR;
2229 		}
2230 
2231 		/* Clear PHY COMA */
2232 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2233 		pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
2234 		reg &= ~PCI_PHY_COMA;
2235 		pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
2236 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2237 
2238 
2239 		for (i = 0; i < hw->ports; i++) {
2240 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2241 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2242 		}
2243 	}
2244 
2245 	/* turn off hardware timer (unused) */
2246 	skge_write8(hw, B2_TI_CTRL, TIM_STOP);
2247 	skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2248 	skge_write8(hw, B0_LED, LED_STAT_ON);
2249 
2250 	/* enable the Tx Arbiters */
2251 	for (i = 0; i < hw->ports; i++)
2252 		skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2253 
2254 	/* Initialize ram interface */
2255 	skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
2256 
2257 	skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
2258 	skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
2259 	skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
2260 	skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
2261 	skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
2262 	skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
2263 	skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
2264 	skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
2265 	skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
2266 	skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
2267 	skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
2268 	skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
2269 
2270 	skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
2271 
2272 	/* Set interrupt moderation for Transmit only
2273 	 * Receive interrupts avoided by NAPI
2274 	 */
2275 	skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
2276 	skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2277 	skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2278 
2279 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2280 
2281 	for (i = 0; i < hw->ports; i++) {
2282 		if (hw->chip_id == CHIP_ID_GENESIS)
2283 			genesis_reset(hw, i);
2284 		else
2285 			yukon_reset(hw, i);
2286 	}
2287 
2288 	return 0;
2289 }
2290 
2291 /* Initialize network device */
skge_devinit(struct skge_hw * hw,int port,int highmem __unused)2292 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
2293 				       int highmem __unused)
2294 {
2295 	struct skge_port *skge;
2296 	struct net_device *dev = alloc_etherdev(sizeof(*skge));
2297 
2298 	if (!dev) {
2299 		DBG(PFX "etherdev alloc failed\n");
2300 		return NULL;
2301 	}
2302 
2303 	dev->dev = &hw->pdev->dev;
2304 
2305 	skge = netdev_priv(dev);
2306 	skge->netdev = dev;
2307 	skge->hw = hw;
2308 
2309 	/* Auto speed and flow control */
2310 	skge->autoneg = AUTONEG_ENABLE;
2311 	skge->flow_control = FLOW_MODE_SYM_OR_REM;
2312 	skge->duplex = -1;
2313 	skge->speed = -1;
2314 	skge->advertising = skge_supported_modes(hw);
2315 
2316 	hw->dev[port] = dev;
2317 
2318 	skge->port = port;
2319 
2320 	/* read the mac address */
2321 	memcpy(dev->hw_addr, (void *) (hw->regs + B2_MAC_1 + port*8), ETH_ALEN);
2322 
2323 	return dev;
2324 }
2325 
skge_show_addr(struct net_device * dev)2326 static void skge_show_addr(struct net_device *dev)
2327 {
2328 	DBG2(PFX "%s: addr %s\n",
2329 	     dev->name, netdev_addr(dev));
2330 }
2331 
skge_probe(struct pci_device * pdev)2332 static int skge_probe(struct pci_device *pdev)
2333 {
2334 	struct net_device *dev, *dev1;
2335 	struct skge_hw *hw;
2336 	int err, using_dac = 0;
2337 
2338 	adjust_pci_device(pdev);
2339 
2340 	err = -ENOMEM;
2341 	hw = zalloc(sizeof(*hw));
2342 	if (!hw) {
2343 		DBG(PFX "cannot allocate hardware struct\n");
2344 		goto err_out_free_regions;
2345 	}
2346 
2347 	hw->pdev = pdev;
2348 
2349 	hw->regs = (unsigned long)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0),
2350 				SKGE_REG_SIZE);
2351 	if (!hw->regs) {
2352 		DBG(PFX "cannot map device registers\n");
2353 		goto err_out_free_hw;
2354 	}
2355 
2356 	err = skge_reset(hw);
2357 	if (err)
2358 		goto err_out_iounmap;
2359 
2360 	DBG(PFX " addr 0x%llx irq %d chip %s rev %d\n",
2361 	    (unsigned long long)pdev->ioaddr, pdev->irq,
2362 	    skge_board_name(hw), hw->chip_rev);
2363 
2364 	dev = skge_devinit(hw, 0, using_dac);
2365 	if (!dev)
2366 		goto err_out_led_off;
2367 
2368 	netdev_init ( dev, &skge_operations );
2369 
2370 	err = register_netdev(dev);
2371 	if (err) {
2372 		DBG(PFX "cannot register net device\n");
2373 		goto err_out_free_netdev;
2374 	}
2375 
2376 	skge_show_addr(dev);
2377 
2378 	if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
2379 		if (register_netdev(dev1) == 0)
2380 			skge_show_addr(dev1);
2381 		else {
2382 			/* Failure to register second port need not be fatal */
2383 			DBG(PFX "register of second port failed\n");
2384 			hw->dev[1] = NULL;
2385 			netdev_nullify(dev1);
2386 			netdev_put(dev1);
2387 		}
2388 	}
2389 	pci_set_drvdata(pdev, hw);
2390 
2391 	return 0;
2392 
2393 err_out_free_netdev:
2394 	netdev_nullify(dev);
2395 	netdev_put(dev);
2396 err_out_led_off:
2397 	skge_write16(hw, B0_LED, LED_STAT_OFF);
2398 err_out_iounmap:
2399 	iounmap((void*)hw->regs);
2400 err_out_free_hw:
2401 	free(hw);
2402 err_out_free_regions:
2403 	pci_set_drvdata(pdev, NULL);
2404 	return err;
2405 }
2406 
skge_remove(struct pci_device * pdev)2407 static void skge_remove(struct pci_device *pdev)
2408 {
2409 	struct skge_hw *hw  = pci_get_drvdata(pdev);
2410 	struct net_device *dev0, *dev1;
2411 
2412 	if (!hw)
2413 		return;
2414 
2415 	if ((dev1 = hw->dev[1]))
2416 		unregister_netdev(dev1);
2417 	dev0 = hw->dev[0];
2418 	unregister_netdev(dev0);
2419 
2420 	hw->intr_mask = 0;
2421 	skge_write32(hw, B0_IMSK, 0);
2422 	skge_read32(hw, B0_IMSK);
2423 
2424 	skge_write16(hw, B0_LED, LED_STAT_OFF);
2425 	skge_write8(hw, B0_CTST, CS_RST_SET);
2426 
2427 	if (dev1) {
2428 		netdev_nullify(dev1);
2429 		netdev_put(dev1);
2430 	}
2431 	netdev_nullify(dev0);
2432 	netdev_put(dev0);
2433 
2434 	iounmap((void*)hw->regs);
2435 	free(hw);
2436 	pci_set_drvdata(pdev, NULL);
2437 }
2438 
2439 /*
2440  * Enable or disable IRQ masking.
2441  *
2442  * @v netdev		Device to control.
2443  * @v enable		Zero to mask off IRQ, non-zero to enable IRQ.
2444  *
2445  * This is a iPXE Network Driver API function.
2446  */
skge_net_irq(struct net_device * dev,int enable)2447 static void skge_net_irq ( struct net_device *dev, int enable ) {
2448 	struct skge_port *skge = netdev_priv(dev);
2449 	struct skge_hw *hw = skge->hw;
2450 
2451 	if (enable)
2452 		hw->intr_mask |= portmask[skge->port];
2453 	else
2454 		hw->intr_mask &= ~portmask[skge->port];
2455 	skge_write32(hw, B0_IMSK, hw->intr_mask);
2456 }
2457 
2458 struct pci_driver skge_driver __pci_driver = {
2459 	.ids      = skge_id_table,
2460 	.id_count = ( sizeof (skge_id_table) / sizeof (skge_id_table[0]) ),
2461 	.probe    = skge_probe,
2462 	.remove   = skge_remove
2463 };
2464 
2465