1 /*
2 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
3 *
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 *
7 * Modified for iPXE, October 2009 by Joshua Oreman <oremanj@rwcr.net>.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 51
21 * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER );
25
26 #include "atl1e.h"
27
28 /* User-tweakable parameters: */
29 #define TX_DESC_COUNT 32 /* TX descriptors, minimum 32 */
30 #define RX_MEM_SIZE 8192 /* RX area size, minimum 8kb */
31 #define MAX_FRAME_SIZE 1500 /* Maximum MTU supported, minimum 1500 */
32
33 /* Arcane parameters: */
34 #define PREAMBLE_LEN 7
35 #define RX_JUMBO_THRESH ((MAX_FRAME_SIZE + ETH_HLEN + \
36 VLAN_HLEN + ETH_FCS_LEN + 7) >> 3)
37 #define IMT_VAL 100 /* interrupt moderator timer, us */
38 #define ICT_VAL 50000 /* interrupt clear timer, us */
39 #define SMB_TIMER 200000
40 #define RRD_THRESH 1 /* packets to queue before interrupt */
41 #define TPD_BURST 5
42 #define TPD_THRESH (TX_DESC_COUNT / 2)
43 #define RX_COUNT_DOWN 4
44 #define TX_COUNT_DOWN (IMT_VAL * 4 / 3)
45 #define DMAR_DLY_CNT 15
46 #define DMAW_DLY_CNT 4
47
48 #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
49
50 /*
51 * atl1e_pci_tbl - PCI Device ID Table
52 *
53 * Wildcard entries (PCI_ANY_ID) should come last
54 * Last entry must be all 0s
55 *
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
57 * Class, Class Mask, private data (not used) }
58 */
59 static struct pci_device_id atl1e_pci_tbl[] = {
60 PCI_ROM(0x1969, 0x1026, "atl1e_26", "Attansic L1E 0x1026", 0),
61 PCI_ROM(0x1969, 0x1066, "atl1e_66", "Attansic L1E 0x1066", 0),
62 };
63
64 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
65
66 static const u16
67 atl1e_rx_page_vld_regs[AT_PAGE_NUM_PER_QUEUE] =
68 {
69 REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD
70 };
71
72 static const u16
73 atl1e_rx_page_lo_addr_regs[AT_PAGE_NUM_PER_QUEUE] =
74 {
75 REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO
76 };
77
78 static const u16
79 atl1e_rx_page_write_offset_regs[AT_PAGE_NUM_PER_QUEUE] =
80 {
81 REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO
82 };
83
84 static const u16 atl1e_pay_load_size[] = {
85 128, 256, 512, 1024, 2048, 4096,
86 };
87
88 /*
89 * atl1e_irq_enable - Enable default interrupt generation settings
90 * @adapter: board private structure
91 */
atl1e_irq_enable(struct atl1e_adapter * adapter)92 static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
93 {
94 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
95 AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
96 AT_WRITE_FLUSH(&adapter->hw);
97 }
98
99 /*
100 * atl1e_irq_disable - Mask off interrupt generation on the NIC
101 * @adapter: board private structure
102 */
atl1e_irq_disable(struct atl1e_adapter * adapter)103 static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
104 {
105 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
106 AT_WRITE_FLUSH(&adapter->hw);
107 }
108
109 /*
110 * atl1e_irq_reset - reset interrupt confiure on the NIC
111 * @adapter: board private structure
112 */
atl1e_irq_reset(struct atl1e_adapter * adapter)113 static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
114 {
115 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
116 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
117 AT_WRITE_FLUSH(&adapter->hw);
118 }
119
atl1e_reset(struct atl1e_adapter * adapter)120 static void atl1e_reset(struct atl1e_adapter *adapter)
121 {
122 atl1e_down(adapter);
123 atl1e_up(adapter);
124 }
125
atl1e_check_link(struct atl1e_adapter * adapter)126 static int atl1e_check_link(struct atl1e_adapter *adapter)
127 {
128 struct atl1e_hw *hw = &adapter->hw;
129 struct net_device *netdev = adapter->netdev;
130 int err = 0;
131 u16 speed, duplex, phy_data;
132
133 /* MII_BMSR must read twise */
134 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
135 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
136
137 if ((phy_data & BMSR_LSTATUS) == 0) {
138 /* link down */
139 if (netdev_link_ok(netdev)) { /* old link state: Up */
140 u32 value;
141 /* disable rx */
142 value = AT_READ_REG(hw, REG_MAC_CTRL);
143 value &= ~MAC_CTRL_RX_EN;
144 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
145 adapter->link_speed = SPEED_0;
146
147 DBG("atl1e: %s link is down\n", netdev->name);
148 netdev_link_down(netdev);
149 }
150 } else {
151 /* Link Up */
152 err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
153 if (err)
154 return err;
155
156 /* link result is our setting */
157 if (adapter->link_speed != speed ||
158 adapter->link_duplex != duplex) {
159 adapter->link_speed = speed;
160 adapter->link_duplex = duplex;
161 atl1e_setup_mac_ctrl(adapter);
162
163 DBG("atl1e: %s link is up, %d Mbps, %s duplex\n",
164 netdev->name, adapter->link_speed,
165 adapter->link_duplex == FULL_DUPLEX ?
166 "full" : "half");
167 netdev_link_up(netdev);
168 }
169 }
170 return 0;
171 }
172
atl1e_mdio_read(struct net_device * netdev,int phy_id __unused,int reg_num)173 static int atl1e_mdio_read(struct net_device *netdev, int phy_id __unused,
174 int reg_num)
175 {
176 struct atl1e_adapter *adapter = netdev_priv(netdev);
177 u16 result;
178
179 atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
180 return result;
181 }
182
atl1e_mdio_write(struct net_device * netdev,int phy_id __unused,int reg_num,int val)183 static void atl1e_mdio_write(struct net_device *netdev, int phy_id __unused,
184 int reg_num, int val)
185 {
186 struct atl1e_adapter *adapter = netdev_priv(netdev);
187
188 atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
189 }
190
atl1e_setup_pcicmd(struct pci_device * pdev)191 static void atl1e_setup_pcicmd(struct pci_device *pdev)
192 {
193 u16 cmd;
194
195 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
196 cmd |= (PCI_COMMAND_MEM | PCI_COMMAND_MASTER);
197 pci_write_config_word(pdev, PCI_COMMAND, cmd);
198
199 /*
200 * some motherboards BIOS(PXE/EFI) driver may set PME
201 * while they transfer control to OS (Windows/Linux)
202 * so we should clear this bit before NIC work normally
203 */
204 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
205 mdelay(1);
206 }
207
208 /*
209 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
210 * @adapter: board private structure to initialize
211 *
212 * atl1e_sw_init initializes the Adapter private data structure.
213 * Fields are initialized based on PCI device information and
214 * OS network device settings (MTU size).
215 */
atl1e_sw_init(struct atl1e_adapter * adapter)216 static int atl1e_sw_init(struct atl1e_adapter *adapter)
217 {
218 struct atl1e_hw *hw = &adapter->hw;
219 struct pci_device *pdev = adapter->pdev;
220 u32 phy_status_data = 0;
221 u8 rev_id = 0;
222
223 adapter->link_speed = SPEED_0; /* hardware init */
224 adapter->link_duplex = FULL_DUPLEX;
225
226 /* PCI config space info */
227 pci_read_config_byte(pdev, PCI_REVISION, &rev_id);
228
229 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
230 /* nic type */
231 if (rev_id >= 0xF0) {
232 hw->nic_type = athr_l2e_revB;
233 } else {
234 if (phy_status_data & PHY_STATUS_100M)
235 hw->nic_type = athr_l1e;
236 else
237 hw->nic_type = athr_l2e_revA;
238 }
239
240 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
241
242 hw->emi_ca = !!(phy_status_data & PHY_STATUS_EMI_CA);
243
244 hw->phy_configured = 0;
245
246 /* need confirm */
247
248 hw->dmar_block = atl1e_dma_req_1024;
249 hw->dmaw_block = atl1e_dma_req_1024;
250
251 return 0;
252 }
253
254 /*
255 * atl1e_clean_tx_ring - free all Tx buffers for device close
256 * @adapter: board private structure
257 */
atl1e_clean_tx_ring(struct atl1e_adapter * adapter)258 static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
259 {
260 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
261 &adapter->tx_ring;
262 struct atl1e_tx_buffer *tx_buffer = NULL;
263 u16 index, ring_count = tx_ring->count;
264
265 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
266 return;
267
268 for (index = 0; index < ring_count; index++) {
269 tx_buffer = &tx_ring->tx_buffer[index];
270 if (tx_buffer->iob) {
271 netdev_tx_complete(adapter->netdev, tx_buffer->iob);
272 tx_buffer->dma = 0;
273 tx_buffer->iob = NULL;
274 }
275 }
276
277 /* Zero out Tx-buffers */
278 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
279 ring_count);
280 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
281 ring_count);
282 }
283
284 /*
285 * atl1e_clean_rx_ring - Free rx-reservation iobs
286 * @adapter: board private structure
287 */
atl1e_clean_rx_ring(struct atl1e_adapter * adapter)288 static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
289 {
290 struct atl1e_rx_ring *rx_ring =
291 (struct atl1e_rx_ring *)&adapter->rx_ring;
292 struct atl1e_rx_page_desc *rx_page_desc = &rx_ring->rx_page_desc;
293 u16 j;
294
295 if (adapter->ring_vir_addr == NULL)
296 return;
297
298 /* Zero out the descriptor ring */
299 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
300 if (rx_page_desc->rx_page[j].addr != NULL) {
301 memset(rx_page_desc->rx_page[j].addr, 0,
302 rx_ring->real_page_size);
303 }
304 }
305 }
306
atl1e_cal_ring_size(struct atl1e_adapter * adapter,u32 * ring_size)307 static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
308 {
309 *ring_size = ((u32)(adapter->tx_ring.count *
310 sizeof(struct atl1e_tpd_desc) + 7
311 /* tx ring, qword align */
312 + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE
313 + 31
314 /* rx ring, 32 bytes align */
315 + (1 + AT_PAGE_NUM_PER_QUEUE) *
316 sizeof(u32) + 3));
317 /* tx, rx cmd, dword align */
318 }
319
atl1e_init_ring_resources(struct atl1e_adapter * adapter)320 static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
321 {
322 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
323
324 rx_ring->real_page_size = adapter->rx_ring.page_size
325 + MAX_FRAME_SIZE
326 + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
327 rx_ring->real_page_size = (rx_ring->real_page_size + 31) & ~31;
328 atl1e_cal_ring_size(adapter, &adapter->ring_size);
329
330 adapter->ring_vir_addr = NULL;
331 adapter->rx_ring.desc = NULL;
332
333 return;
334 }
335
336 /*
337 * Read / Write Ptr Initialize:
338 */
atl1e_init_ring_ptrs(struct atl1e_adapter * adapter)339 static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
340 {
341 struct atl1e_tx_ring *tx_ring = NULL;
342 struct atl1e_rx_ring *rx_ring = NULL;
343 struct atl1e_rx_page_desc *rx_page_desc = NULL;
344 int j;
345
346 tx_ring = &adapter->tx_ring;
347 rx_ring = &adapter->rx_ring;
348 rx_page_desc = &rx_ring->rx_page_desc;
349
350 tx_ring->next_to_use = 0;
351 tx_ring->next_to_clean = 0;
352
353 rx_page_desc->rx_using = 0;
354 rx_page_desc->rx_nxseq = 0;
355 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
356 *rx_page_desc->rx_page[j].write_offset_addr = 0;
357 rx_page_desc->rx_page[j].read_offset = 0;
358 }
359 }
360
361 /*
362 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
363 * @adapter: board private structure
364 *
365 * Free all transmit software resources
366 */
atl1e_free_ring_resources(struct atl1e_adapter * adapter)367 static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
368 {
369 atl1e_clean_tx_ring(adapter);
370 atl1e_clean_rx_ring(adapter);
371
372 if (adapter->ring_vir_addr) {
373 free_dma(adapter->ring_vir_addr, adapter->ring_size);
374 adapter->ring_vir_addr = NULL;
375 adapter->ring_dma = 0;
376 }
377
378 if (adapter->tx_ring.tx_buffer) {
379 free(adapter->tx_ring.tx_buffer);
380 adapter->tx_ring.tx_buffer = NULL;
381 }
382 }
383
384 /*
385 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
386 * @adapter: board private structure
387 *
388 * Return 0 on success, negative on failure
389 */
atl1e_setup_ring_resources(struct atl1e_adapter * adapter)390 static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
391 {
392 struct atl1e_tx_ring *tx_ring;
393 struct atl1e_rx_ring *rx_ring;
394 struct atl1e_rx_page_desc *rx_page_desc;
395 int size, j;
396 u32 offset = 0;
397 int err = 0;
398
399 if (adapter->ring_vir_addr != NULL)
400 return 0; /* alloced already */
401
402 tx_ring = &adapter->tx_ring;
403 rx_ring = &adapter->rx_ring;
404
405 /* real ring DMA buffer */
406
407 size = adapter->ring_size;
408 adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
409
410 if (adapter->ring_vir_addr == NULL) {
411 DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
412 adapter->ring_size, adapter->netdev->name);
413 return -ENOMEM;
414 }
415
416 adapter->ring_dma = virt_to_bus(adapter->ring_vir_addr);
417 memset(adapter->ring_vir_addr, 0, adapter->ring_size);
418
419 rx_page_desc = &rx_ring->rx_page_desc;
420
421 /* Init TPD Ring */
422 tx_ring->dma = (adapter->ring_dma + 7) & ~7;
423 offset = tx_ring->dma - adapter->ring_dma;
424 tx_ring->desc = (struct atl1e_tpd_desc *)
425 (adapter->ring_vir_addr + offset);
426 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
427 tx_ring->tx_buffer = zalloc(size);
428 if (tx_ring->tx_buffer == NULL) {
429 DBG("atl1e: out of memory allocating %d bytes for %s txbuf\n",
430 size, adapter->netdev->name);
431 err = -ENOMEM;
432 goto failed;
433 }
434
435 /* Init RXF-Pages */
436 offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
437 offset = (offset + 31) & ~31;
438
439 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
440 rx_page_desc->rx_page[j].dma =
441 adapter->ring_dma + offset;
442 rx_page_desc->rx_page[j].addr =
443 adapter->ring_vir_addr + offset;
444 offset += rx_ring->real_page_size;
445 }
446
447 /* Init CMB dma address */
448 tx_ring->cmb_dma = adapter->ring_dma + offset;
449 tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
450 offset += sizeof(u32);
451
452 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
453 rx_page_desc->rx_page[j].write_offset_dma =
454 adapter->ring_dma + offset;
455 rx_page_desc->rx_page[j].write_offset_addr =
456 adapter->ring_vir_addr + offset;
457 offset += sizeof(u32);
458 }
459
460 if (offset > adapter->ring_size) {
461 DBG("atl1e: ring miscalculation! need %d > %d bytes\n",
462 offset, adapter->ring_size);
463 err = -EINVAL;
464 goto failed;
465 }
466
467 return 0;
468 failed:
469 atl1e_free_ring_resources(adapter);
470 return err;
471 }
472
atl1e_configure_des_ring(const struct atl1e_adapter * adapter)473 static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
474 {
475
476 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
477 struct atl1e_rx_ring *rx_ring =
478 (struct atl1e_rx_ring *)&adapter->rx_ring;
479 struct atl1e_tx_ring *tx_ring =
480 (struct atl1e_tx_ring *)&adapter->tx_ring;
481 struct atl1e_rx_page_desc *rx_page_desc = NULL;
482 int j;
483
484 AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, 0);
485 AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, tx_ring->dma);
486 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
487 AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, tx_ring->cmb_dma);
488
489 rx_page_desc = &rx_ring->rx_page_desc;
490
491 /* RXF Page Physical address / Page Length */
492 AT_WRITE_REG(hw, REG_RXF0_BASE_ADDR_HI, 0);
493
494 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
495 u32 page_phy_addr;
496 u32 offset_phy_addr;
497
498 page_phy_addr = rx_page_desc->rx_page[j].dma;
499 offset_phy_addr = rx_page_desc->rx_page[j].write_offset_dma;
500
501 AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[j], page_phy_addr);
502 AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[j],
503 offset_phy_addr);
504 AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[j], 1);
505 }
506
507 /* Page Length */
508 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
509 /* Load all of base address above */
510 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
511
512 return;
513 }
514
atl1e_configure_tx(struct atl1e_adapter * adapter)515 static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
516 {
517 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
518 u32 dev_ctrl_data = 0;
519 u32 max_pay_load = 0;
520 u32 jumbo_thresh = 0;
521 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
522
523 /* configure TXQ param */
524 if (hw->nic_type != athr_l2e_revB) {
525 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
526 jumbo_thresh = MAX_FRAME_SIZE + extra_size;
527 AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
528 }
529
530 dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
531
532 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
533 DEVICE_CTRL_MAX_PAYLOAD_MASK;
534 if (max_pay_load < hw->dmaw_block)
535 hw->dmaw_block = max_pay_load;
536
537 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
538 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
539 if (max_pay_load < hw->dmar_block)
540 hw->dmar_block = max_pay_load;
541
542 if (hw->nic_type != athr_l2e_revB)
543 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
544 atl1e_pay_load_size[hw->dmar_block]);
545 /* enable TXQ */
546 AT_WRITE_REGW(hw, REG_TXQ_CTRL,
547 ((TPD_BURST & TXQ_CTRL_NUM_TPD_BURST_MASK)
548 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
549 | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
550 return;
551 }
552
atl1e_configure_rx(struct atl1e_adapter * adapter)553 static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
554 {
555 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
556 u32 rxf_len = 0;
557 u32 rxf_low = 0;
558 u32 rxf_high = 0;
559 u32 rxf_thresh_data = 0;
560 u32 rxq_ctrl_data = 0;
561
562 if (hw->nic_type != athr_l2e_revB) {
563 AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
564 (u16)((RX_JUMBO_THRESH & RXQ_JMBOSZ_TH_MASK) <<
565 RXQ_JMBOSZ_TH_SHIFT |
566 (1 & RXQ_JMBO_LKAH_MASK) <<
567 RXQ_JMBO_LKAH_SHIFT));
568
569 rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
570 rxf_high = rxf_len * 4 / 5;
571 rxf_low = rxf_len / 5;
572 rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
573 << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
574 ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
575 << RXQ_RXF_PAUSE_TH_LO_SHIFT);
576
577 AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
578 }
579
580 /* RRS */
581 AT_WRITE_REG(hw, REG_IDT_TABLE, 0);
582 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, 0);
583
584 rxq_ctrl_data |= RXQ_CTRL_PBA_ALIGN_32 |
585 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
586
587 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
588 return;
589 }
590
atl1e_configure_dma(struct atl1e_adapter * adapter)591 static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
592 {
593 struct atl1e_hw *hw = &adapter->hw;
594 u32 dma_ctrl_data = 0;
595
596 dma_ctrl_data = DMA_CTRL_RXCMB_EN;
597 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
598 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
599 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
600 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
601 dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
602 dma_ctrl_data |= (DMAR_DLY_CNT & DMA_CTRL_DMAR_DLY_CNT_MASK)
603 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
604 dma_ctrl_data |= (DMAW_DLY_CNT & DMA_CTRL_DMAW_DLY_CNT_MASK)
605 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
606
607 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
608 return;
609 }
610
atl1e_setup_mac_ctrl(struct atl1e_adapter * adapter)611 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
612 {
613 u32 value;
614 struct atl1e_hw *hw = &adapter->hw;
615
616 /* Config MAC CTRL Register */
617 value = MAC_CTRL_TX_EN |
618 MAC_CTRL_RX_EN ;
619
620 if (FULL_DUPLEX == adapter->link_duplex)
621 value |= MAC_CTRL_DUPLX;
622
623 value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
624 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
625 MAC_CTRL_SPEED_SHIFT);
626 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
627
628 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
629 value |= ((PREAMBLE_LEN & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
630
631 value |= MAC_CTRL_BC_EN;
632 value |= MAC_CTRL_MC_ALL_EN;
633
634 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
635 }
636
637 /*
638 * atl1e_configure - Configure Transmit&Receive Unit after Reset
639 * @adapter: board private structure
640 *
641 * Configure the Tx /Rx unit of the MAC after a reset.
642 */
atl1e_configure(struct atl1e_adapter * adapter)643 static int atl1e_configure(struct atl1e_adapter *adapter)
644 {
645 struct atl1e_hw *hw = &adapter->hw;
646 u32 intr_status_data = 0;
647
648 /* clear interrupt status */
649 AT_WRITE_REG(hw, REG_ISR, ~0);
650
651 /* 1. set MAC Address */
652 atl1e_hw_set_mac_addr(hw);
653
654 /* 2. Init the Multicast HASH table (clear) */
655 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
656 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
657
658 /* 3. Clear any WOL status */
659 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
660
661 /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
662 * TPD Ring/SMB/RXF0 Page CMBs, they use the same
663 * High 32bits memory */
664 atl1e_configure_des_ring(adapter);
665
666 /* 5. set Interrupt Moderator Timer */
667 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, IMT_VAL);
668 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, IMT_VAL);
669 AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
670 MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
671
672 /* 6. rx/tx threshold to trig interrupt */
673 AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, RRD_THRESH);
674 AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, TPD_THRESH);
675 AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, RX_COUNT_DOWN);
676 AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, TX_COUNT_DOWN);
677
678 /* 7. set Interrupt Clear Timer */
679 AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, ICT_VAL);
680
681 /* 8. set MTU */
682 AT_WRITE_REG(hw, REG_MTU, MAX_FRAME_SIZE + ETH_HLEN +
683 VLAN_HLEN + ETH_FCS_LEN);
684
685 /* 9. config TXQ early tx threshold */
686 atl1e_configure_tx(adapter);
687
688 /* 10. config RXQ */
689 atl1e_configure_rx(adapter);
690
691 /* 11. config DMA Engine */
692 atl1e_configure_dma(adapter);
693
694 /* 12. smb timer to trig interrupt */
695 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, SMB_TIMER);
696
697 intr_status_data = AT_READ_REG(hw, REG_ISR);
698 if ((intr_status_data & ISR_PHY_LINKDOWN) != 0) {
699 DBG("atl1e: configure failed, PCIE phy link down\n");
700 return -1;
701 }
702
703 AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
704 return 0;
705 }
706
atl1e_clear_phy_int(struct atl1e_adapter * adapter)707 static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
708 {
709 u16 phy_data;
710
711 atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
712 }
713
atl1e_clean_tx_irq(struct atl1e_adapter * adapter)714 static int atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
715 {
716 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
717 &adapter->tx_ring;
718 struct atl1e_tx_buffer *tx_buffer = NULL;
719 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
720 u16 next_to_clean = tx_ring->next_to_clean;
721
722 while (next_to_clean != hw_next_to_clean) {
723 tx_buffer = &tx_ring->tx_buffer[next_to_clean];
724
725 tx_buffer->dma = 0;
726 if (tx_buffer->iob) {
727 netdev_tx_complete(adapter->netdev, tx_buffer->iob);
728 tx_buffer->iob = NULL;
729 }
730
731 if (++next_to_clean == tx_ring->count)
732 next_to_clean = 0;
733 }
734
735 tx_ring->next_to_clean = next_to_clean;
736
737 return 1;
738 }
739
atl1e_get_rx_page(struct atl1e_adapter * adapter)740 static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter)
741 {
742 struct atl1e_rx_page_desc *rx_page_desc =
743 (struct atl1e_rx_page_desc *) &adapter->rx_ring.rx_page_desc;
744 u8 rx_using = rx_page_desc->rx_using;
745
746 return (struct atl1e_rx_page *)&(rx_page_desc->rx_page[rx_using]);
747 }
748
atl1e_clean_rx_irq(struct atl1e_adapter * adapter)749 static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter)
750 {
751 struct net_device *netdev = adapter->netdev;
752 struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
753 &adapter->rx_ring;
754 struct atl1e_rx_page_desc *rx_page_desc =
755 (struct atl1e_rx_page_desc *) &rx_ring->rx_page_desc;
756 struct io_buffer *iob = NULL;
757 struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter);
758 u32 packet_size, write_offset;
759 struct atl1e_recv_ret_status *prrs;
760
761 write_offset = *(rx_page->write_offset_addr);
762 if (rx_page->read_offset >= write_offset)
763 return;
764
765 do {
766 /* get new packet's rrs */
767 prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
768 rx_page->read_offset);
769 /* check sequence number */
770 if (prrs->seq_num != rx_page_desc->rx_nxseq) {
771 DBG("atl1e %s: RX sequence number error (%d != %d)\n",
772 netdev->name, prrs->seq_num,
773 rx_page_desc->rx_nxseq);
774 rx_page_desc->rx_nxseq++;
775 goto fatal_err;
776 }
777
778 rx_page_desc->rx_nxseq++;
779
780 /* error packet */
781 if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
782 if (prrs->err_flag & (RRS_ERR_BAD_CRC |
783 RRS_ERR_DRIBBLE | RRS_ERR_CODE |
784 RRS_ERR_TRUNC)) {
785 /* hardware error, discard this
786 packet */
787 netdev_rx_err(netdev, NULL, EIO);
788 goto skip_pkt;
789 }
790 }
791
792 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
793 RRS_PKT_SIZE_MASK) - ETH_FCS_LEN;
794 iob = alloc_iob(packet_size + NET_IP_ALIGN);
795 if (iob == NULL) {
796 DBG("atl1e %s: dropping packet under memory pressure\n",
797 netdev->name);
798 goto skip_pkt;
799 }
800 iob_reserve(iob, NET_IP_ALIGN);
801 memcpy(iob->data, (u8 *)(prrs + 1), packet_size);
802 iob_put(iob, packet_size);
803
804 netdev_rx(netdev, iob);
805
806 skip_pkt:
807 /* skip current packet whether it's ok or not. */
808 rx_page->read_offset +=
809 (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
810 RRS_PKT_SIZE_MASK) +
811 sizeof(struct atl1e_recv_ret_status) + 31) &
812 0xFFFFFFE0);
813
814 if (rx_page->read_offset >= rx_ring->page_size) {
815 /* mark this page clean */
816 u16 reg_addr;
817 u8 rx_using;
818
819 rx_page->read_offset =
820 *(rx_page->write_offset_addr) = 0;
821 rx_using = rx_page_desc->rx_using;
822 reg_addr =
823 atl1e_rx_page_vld_regs[rx_using];
824 AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
825 rx_page_desc->rx_using ^= 1;
826 rx_page = atl1e_get_rx_page(adapter);
827 }
828 write_offset = *(rx_page->write_offset_addr);
829 } while (rx_page->read_offset < write_offset);
830
831 return;
832
833 fatal_err:
834 if (!netdev_link_ok(adapter->netdev))
835 atl1e_reset(adapter);
836 }
837
838 /*
839 * atl1e_poll - poll for completed transmissions and received packets
840 * @netdev: network device
841 */
atl1e_poll(struct net_device * netdev)842 static void atl1e_poll(struct net_device *netdev)
843 {
844 struct atl1e_adapter *adapter = netdev_priv(netdev);
845 struct atl1e_hw *hw = &adapter->hw;
846 int max_ints = 64;
847 u32 status;
848
849 do {
850 status = AT_READ_REG(hw, REG_ISR);
851 if ((status & IMR_NORMAL_MASK) == 0)
852 break;
853
854 /* link event */
855 if (status & ISR_GPHY)
856 atl1e_clear_phy_int(adapter);
857 /* Ack ISR */
858 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
859
860 /* check if PCIE PHY Link down */
861 if (status & ISR_PHY_LINKDOWN) {
862 DBG("atl1e: PCI-E PHY link down: %x\n", status);
863 if (netdev_link_ok(adapter->netdev)) {
864 /* reset MAC */
865 atl1e_irq_reset(adapter);
866 atl1e_reset(adapter);
867 break;
868 }
869 }
870
871 /* check if DMA read/write error */
872 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
873 DBG("atl1e: PCI-E DMA RW error: %x\n", status);
874 atl1e_irq_reset(adapter);
875 atl1e_reset(adapter);
876 break;
877 }
878
879 /* link event */
880 if (status & (ISR_GPHY | ISR_MANUAL)) {
881 atl1e_check_link(adapter);
882 break;
883 }
884
885 /* transmit event */
886 if (status & ISR_TX_EVENT)
887 atl1e_clean_tx_irq(adapter);
888
889 if (status & ISR_RX_EVENT)
890 atl1e_clean_rx_irq(adapter);
891 } while (--max_ints > 0);
892
893 /* re-enable Interrupt*/
894 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
895
896 return;
897 }
898
atl1e_tpd_avail(struct atl1e_adapter * adapter)899 static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
900 {
901 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
902 u16 next_to_use = 0;
903 u16 next_to_clean = 0;
904
905 next_to_clean = tx_ring->next_to_clean;
906 next_to_use = tx_ring->next_to_use;
907
908 return (u16)(next_to_clean > next_to_use) ?
909 (next_to_clean - next_to_use - 1) :
910 (tx_ring->count + next_to_clean - next_to_use - 1);
911 }
912
913 /*
914 * get next usable tpd
915 * Note: should call atl1e_tdp_avail to make sure
916 * there is enough tpd to use
917 */
atl1e_get_tpd(struct atl1e_adapter * adapter)918 static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
919 {
920 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
921 u16 next_to_use = 0;
922
923 next_to_use = tx_ring->next_to_use;
924 if (++tx_ring->next_to_use == tx_ring->count)
925 tx_ring->next_to_use = 0;
926
927 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
928 return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
929 }
930
931 static struct atl1e_tx_buffer *
atl1e_get_tx_buffer(struct atl1e_adapter * adapter,struct atl1e_tpd_desc * tpd)932 atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
933 {
934 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
935
936 return &tx_ring->tx_buffer[tpd - tx_ring->desc];
937 }
938
atl1e_tx_map(struct atl1e_adapter * adapter,struct io_buffer * iob,struct atl1e_tpd_desc * tpd)939 static void atl1e_tx_map(struct atl1e_adapter *adapter,
940 struct io_buffer *iob, struct atl1e_tpd_desc *tpd)
941 {
942 struct atl1e_tx_buffer *tx_buffer = NULL;
943 u16 buf_len = iob_len(iob);
944
945 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
946 tx_buffer->iob = iob;
947 tx_buffer->length = buf_len;
948 tx_buffer->dma = virt_to_bus(iob->data);
949 tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
950 tpd->word2 = ((tpd->word2 & ~TPD_BUFLEN_MASK) |
951 ((cpu_to_le32(buf_len) & TPD_BUFLEN_MASK) <<
952 TPD_BUFLEN_SHIFT));
953 tpd->word3 |= 1 << TPD_EOP_SHIFT;
954 }
955
atl1e_tx_queue(struct atl1e_adapter * adapter,u16 count __unused,struct atl1e_tpd_desc * tpd __unused)956 static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count __unused,
957 struct atl1e_tpd_desc *tpd __unused)
958 {
959 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
960 wmb();
961 AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
962 }
963
atl1e_xmit_frame(struct net_device * netdev,struct io_buffer * iob)964 static int atl1e_xmit_frame(struct net_device *netdev, struct io_buffer *iob)
965 {
966 struct atl1e_adapter *adapter = netdev_priv(netdev);
967 u16 tpd_req = 1;
968 struct atl1e_tpd_desc *tpd;
969
970 if (!netdev_link_ok(netdev)) {
971 return -EINVAL;
972 }
973
974 if (atl1e_tpd_avail(adapter) < tpd_req) {
975 return -EBUSY;
976 }
977
978 tpd = atl1e_get_tpd(adapter);
979
980 atl1e_tx_map(adapter, iob, tpd);
981 atl1e_tx_queue(adapter, tpd_req, tpd);
982
983 return 0;
984 }
985
atl1e_up(struct atl1e_adapter * adapter)986 int atl1e_up(struct atl1e_adapter *adapter)
987 {
988 struct net_device *netdev = adapter->netdev;
989 int err = 0;
990 u32 val;
991
992 /* hardware has been reset, we need to reload some things */
993 err = atl1e_init_hw(&adapter->hw);
994 if (err) {
995 return -EIO;
996 }
997 atl1e_init_ring_ptrs(adapter);
998
999 memcpy(adapter->hw.mac_addr, netdev->ll_addr, ETH_ALEN);
1000
1001 if (atl1e_configure(adapter) != 0) {
1002 return -EIO;
1003 }
1004
1005 atl1e_irq_disable(adapter);
1006
1007 val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1008 AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
1009 val | MASTER_CTRL_MANUAL_INT);
1010
1011 return err;
1012 }
1013
atl1e_irq(struct net_device * netdev,int enable)1014 void atl1e_irq(struct net_device *netdev, int enable)
1015 {
1016 struct atl1e_adapter *adapter = netdev_priv(netdev);
1017
1018 if (enable)
1019 atl1e_irq_enable(adapter);
1020 else
1021 atl1e_irq_disable(adapter);
1022 }
1023
atl1e_down(struct atl1e_adapter * adapter)1024 void atl1e_down(struct atl1e_adapter *adapter)
1025 {
1026 struct net_device *netdev = adapter->netdev;
1027
1028 /* reset MAC to disable all RX/TX */
1029 atl1e_reset_hw(&adapter->hw);
1030 mdelay(1);
1031
1032 netdev_link_down(netdev);
1033 adapter->link_speed = SPEED_0;
1034 adapter->link_duplex = -1;
1035
1036 atl1e_clean_tx_ring(adapter);
1037 atl1e_clean_rx_ring(adapter);
1038 }
1039
1040 /*
1041 * atl1e_open - Called when a network interface is made active
1042 * @netdev: network interface device structure
1043 *
1044 * Returns 0 on success, negative value on failure
1045 *
1046 * The open entry point is called when a network interface is made
1047 * active by the system (IFF_UP). At this point all resources needed
1048 * for transmit and receive operations are allocated, the interrupt
1049 * handler is registered with the OS, the watchdog timer is started,
1050 * and the stack is notified that the interface is ready.
1051 */
atl1e_open(struct net_device * netdev)1052 static int atl1e_open(struct net_device *netdev)
1053 {
1054 struct atl1e_adapter *adapter = netdev_priv(netdev);
1055 int err;
1056
1057 /* allocate rx/tx dma buffer & descriptors */
1058 atl1e_init_ring_resources(adapter);
1059 err = atl1e_setup_ring_resources(adapter);
1060 if (err)
1061 return err;
1062
1063 err = atl1e_up(adapter);
1064 if (err)
1065 goto err_up;
1066
1067 return 0;
1068
1069 err_up:
1070 atl1e_free_ring_resources(adapter);
1071 atl1e_reset_hw(&adapter->hw);
1072
1073 return err;
1074 }
1075
1076 /*
1077 * atl1e_close - Disables a network interface
1078 * @netdev: network interface device structure
1079 *
1080 * Returns 0, this is not allowed to fail
1081 *
1082 * The close entry point is called when an interface is de-activated
1083 * by the OS. The hardware is still under the drivers control, but
1084 * needs to be disabled. A global MAC reset is issued to stop the
1085 * hardware, and all transmit and receive resources are freed.
1086 */
atl1e_close(struct net_device * netdev)1087 static void atl1e_close(struct net_device *netdev)
1088 {
1089 struct atl1e_adapter *adapter = netdev_priv(netdev);
1090
1091 atl1e_down(adapter);
1092 atl1e_free_ring_resources(adapter);
1093 }
1094
1095 static struct net_device_operations atl1e_netdev_ops = {
1096 .open = atl1e_open,
1097 .close = atl1e_close,
1098 .transmit = atl1e_xmit_frame,
1099 .poll = atl1e_poll,
1100 .irq = atl1e_irq,
1101 };
1102
atl1e_init_netdev(struct net_device * netdev,struct pci_device * pdev)1103 static void atl1e_init_netdev(struct net_device *netdev, struct pci_device *pdev)
1104 {
1105 netdev_init(netdev, &atl1e_netdev_ops);
1106
1107 netdev->dev = &pdev->dev;
1108 pci_set_drvdata(pdev, netdev);
1109 }
1110
1111 /*
1112 * atl1e_probe - Device Initialization Routine
1113 * @pdev: PCI device information struct
1114 * @ent: entry in atl1e_pci_tbl
1115 *
1116 * Returns 0 on success, negative on failure
1117 *
1118 * atl1e_probe initializes an adapter identified by a pci_device structure.
1119 * The OS initialization, configuring of the adapter private structure,
1120 * and a hardware reset occur.
1121 */
atl1e_probe(struct pci_device * pdev)1122 static int atl1e_probe(struct pci_device *pdev)
1123 {
1124 struct net_device *netdev;
1125 struct atl1e_adapter *adapter = NULL;
1126 static int cards_found;
1127
1128 int err = 0;
1129
1130 adjust_pci_device(pdev);
1131
1132 netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
1133 if (netdev == NULL) {
1134 err = -ENOMEM;
1135 DBG("atl1e: out of memory allocating net_device\n");
1136 goto err;
1137 }
1138
1139 atl1e_init_netdev(netdev, pdev);
1140
1141 adapter = netdev_priv(netdev);
1142 adapter->bd_number = cards_found;
1143 adapter->netdev = netdev;
1144 adapter->pdev = pdev;
1145 adapter->hw.adapter = adapter;
1146 if (!pdev->membase) {
1147 err = -EIO;
1148 DBG("atl1e: cannot map device registers\n");
1149 goto err_free_netdev;
1150 }
1151 adapter->hw.hw_addr = bus_to_virt(pdev->membase);
1152
1153 /* init mii data */
1154 adapter->mii.dev = netdev;
1155 adapter->mii.mdio_read = atl1e_mdio_read;
1156 adapter->mii.mdio_write = atl1e_mdio_write;
1157 adapter->mii.phy_id_mask = 0x1f;
1158 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
1159
1160 /* get user settings */
1161 adapter->tx_ring.count = TX_DESC_COUNT;
1162 adapter->rx_ring.page_size = RX_MEM_SIZE;
1163
1164 atl1e_setup_pcicmd(pdev);
1165
1166 /* setup the private structure */
1167 err = atl1e_sw_init(adapter);
1168 if (err) {
1169 DBG("atl1e: private data init failed\n");
1170 goto err_free_netdev;
1171 }
1172
1173 /* Init GPHY as early as possible due to power saving issue */
1174 atl1e_phy_init(&adapter->hw);
1175
1176 /* reset the controller to
1177 * put the device in a known good starting state */
1178 err = atl1e_reset_hw(&adapter->hw);
1179 if (err) {
1180 err = -EIO;
1181 goto err_free_netdev;
1182 }
1183
1184 /* This may have been run by a zero-wait timer around
1185 now... unclear. */
1186 atl1e_restart_autoneg(&adapter->hw);
1187
1188 if (atl1e_read_mac_addr(&adapter->hw) != 0) {
1189 DBG("atl1e: cannot read MAC address from EEPROM\n");
1190 err = -EIO;
1191 goto err_free_netdev;
1192 }
1193
1194 memcpy(netdev->hw_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
1195 memcpy(netdev->ll_addr, adapter->hw.mac_addr, ETH_ALEN);
1196 DBG("atl1e: Attansic L1E Ethernet controller on %s, "
1197 "%02x:%02x:%02x:%02x:%02x:%02x\n", adapter->netdev->name,
1198 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
1199 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
1200 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
1201
1202 err = register_netdev(netdev);
1203 if (err) {
1204 DBG("atl1e: cannot register network device\n");
1205 goto err_free_netdev;
1206 }
1207
1208 cards_found++;
1209 return 0;
1210
1211 err_free_netdev:
1212 netdev_nullify(netdev);
1213 netdev_put(netdev);
1214 err:
1215 return err;
1216 }
1217
1218 /*
1219 * atl1e_remove - Device Removal Routine
1220 * @pdev: PCI device information struct
1221 *
1222 * atl1e_remove is called by the PCI subsystem to alert the driver
1223 * that it should release a PCI device. The could be caused by a
1224 * Hot-Plug event, or because the driver is going to be removed from
1225 * memory.
1226 */
atl1e_remove(struct pci_device * pdev)1227 static void atl1e_remove(struct pci_device *pdev)
1228 {
1229 struct net_device *netdev = pci_get_drvdata(pdev);
1230 struct atl1e_adapter *adapter = netdev_priv(netdev);
1231
1232 unregister_netdev(netdev);
1233 atl1e_free_ring_resources(adapter);
1234 atl1e_force_ps(&adapter->hw);
1235 netdev_nullify(netdev);
1236 netdev_put(netdev);
1237 }
1238
1239 struct pci_driver atl1e_driver __pci_driver = {
1240 .ids = atl1e_pci_tbl,
1241 .id_count = (sizeof(atl1e_pci_tbl) / sizeof(atl1e_pci_tbl[0])),
1242 .probe = atl1e_probe,
1243 .remove = atl1e_remove,
1244 };
1245
1246 /********** Hardware-level functions: **********/
1247
1248 /*
1249 * check_eeprom_exist
1250 * return 0 if eeprom exist
1251 */
atl1e_check_eeprom_exist(struct atl1e_hw * hw)1252 int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
1253 {
1254 u32 value;
1255
1256 value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
1257 if (value & SPI_FLASH_CTRL_EN_VPD) {
1258 value &= ~SPI_FLASH_CTRL_EN_VPD;
1259 AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
1260 }
1261 value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
1262 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
1263 }
1264
atl1e_hw_set_mac_addr(struct atl1e_hw * hw)1265 void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
1266 {
1267 u32 value;
1268 /*
1269 * 00-0B-6A-F6-00-DC
1270 * 0: 6AF600DC 1: 000B
1271 * low dword
1272 */
1273 value = (((u32)hw->mac_addr[2]) << 24) |
1274 (((u32)hw->mac_addr[3]) << 16) |
1275 (((u32)hw->mac_addr[4]) << 8) |
1276 (((u32)hw->mac_addr[5])) ;
1277 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
1278 /* hight dword */
1279 value = (((u32)hw->mac_addr[0]) << 8) |
1280 (((u32)hw->mac_addr[1])) ;
1281 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
1282 }
1283
1284 /*
1285 * atl1e_get_permanent_address
1286 * return 0 if get valid mac address,
1287 */
atl1e_get_permanent_address(struct atl1e_hw * hw)1288 static int atl1e_get_permanent_address(struct atl1e_hw *hw)
1289 {
1290 union {
1291 u32 dword[2];
1292 u8 byte[8];
1293 } hw_addr;
1294 u32 i;
1295 u32 twsi_ctrl_data;
1296 u8 eth_addr[ETH_ALEN];
1297
1298 if (!atl1e_check_eeprom_exist(hw)) {
1299 /* eeprom exist */
1300 twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
1301 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
1302 AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
1303 for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
1304 mdelay(10);
1305 twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
1306 if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
1307 break;
1308 }
1309 if (i >= AT_TWSI_EEPROM_TIMEOUT)
1310 return AT_ERR_TIMEOUT;
1311 }
1312
1313 /* maybe MAC-address is from BIOS */
1314 hw_addr.dword[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
1315 hw_addr.dword[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
1316 for (i = 0; i < ETH_ALEN; i++) {
1317 eth_addr[ETH_ALEN - i - 1] = hw_addr.byte[i];
1318 }
1319
1320 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
1321 return 0;
1322 }
1323
atl1e_force_ps(struct atl1e_hw * hw)1324 void atl1e_force_ps(struct atl1e_hw *hw)
1325 {
1326 AT_WRITE_REGW(hw, REG_GPHY_CTRL,
1327 GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
1328 }
1329
1330 /*
1331 * Reads the adapter's MAC address from the EEPROM
1332 *
1333 * hw - Struct containing variables accessed by shared code
1334 */
atl1e_read_mac_addr(struct atl1e_hw * hw)1335 int atl1e_read_mac_addr(struct atl1e_hw *hw)
1336 {
1337 int err = 0;
1338
1339 err = atl1e_get_permanent_address(hw);
1340 if (err)
1341 return AT_ERR_EEPROM;
1342 memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
1343 return 0;
1344 }
1345
1346 /*
1347 * Reads the value from a PHY register
1348 * hw - Struct containing variables accessed by shared code
1349 * reg_addr - address of the PHY register to read
1350 */
atl1e_read_phy_reg(struct atl1e_hw * hw,u16 reg_addr,u16 * phy_data)1351 int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
1352 {
1353 u32 val;
1354 int i;
1355
1356 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
1357 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
1358 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
1359
1360 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
1361
1362 wmb();
1363
1364 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
1365 udelay(2);
1366 val = AT_READ_REG(hw, REG_MDIO_CTRL);
1367 if (!(val & (MDIO_START | MDIO_BUSY)))
1368 break;
1369 wmb();
1370 }
1371 if (!(val & (MDIO_START | MDIO_BUSY))) {
1372 *phy_data = (u16)val;
1373 return 0;
1374 }
1375
1376 return AT_ERR_PHY;
1377 }
1378
1379 /*
1380 * Writes a value to a PHY register
1381 * hw - Struct containing variables accessed by shared code
1382 * reg_addr - address of the PHY register to write
1383 * data - data to write to the PHY
1384 */
atl1e_write_phy_reg(struct atl1e_hw * hw,u32 reg_addr,u16 phy_data)1385 int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
1386 {
1387 int i;
1388 u32 val;
1389
1390 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
1391 (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
1392 MDIO_SUP_PREAMBLE |
1393 MDIO_START |
1394 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
1395
1396 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
1397 wmb();
1398
1399 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
1400 udelay(2);
1401 val = AT_READ_REG(hw, REG_MDIO_CTRL);
1402 if (!(val & (MDIO_START | MDIO_BUSY)))
1403 break;
1404 wmb();
1405 }
1406
1407 if (!(val & (MDIO_START | MDIO_BUSY)))
1408 return 0;
1409
1410 return AT_ERR_PHY;
1411 }
1412
1413 /*
1414 * atl1e_init_pcie - init PCIE module
1415 */
atl1e_init_pcie(struct atl1e_hw * hw)1416 static void atl1e_init_pcie(struct atl1e_hw *hw)
1417 {
1418 u32 value;
1419 /* comment 2lines below to save more power when sususpend
1420 value = LTSSM_TEST_MODE_DEF;
1421 AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
1422 */
1423
1424 /* pcie flow control mode change */
1425 value = AT_READ_REG(hw, 0x1008);
1426 value |= 0x8000;
1427 AT_WRITE_REG(hw, 0x1008, value);
1428 }
1429 /*
1430 * Configures PHY autoneg and flow control advertisement settings
1431 *
1432 * hw - Struct containing variables accessed by shared code
1433 */
atl1e_phy_setup_autoneg_adv(struct atl1e_hw * hw)1434 static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
1435 {
1436 s32 ret_val;
1437 u16 mii_autoneg_adv_reg;
1438 u16 mii_1000t_ctrl_reg;
1439
1440 if (0 != hw->mii_autoneg_adv_reg)
1441 return 0;
1442 /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
1443 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
1444 mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
1445
1446 /*
1447 * First we clear all the 10/100 mb speed bits in the Auto-Neg
1448 * Advertisement Register (Address 4) and the 1000 mb speed bits in
1449 * the 1000Base-T control Register (Address 9).
1450 */
1451 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
1452 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
1453
1454 /* Assume auto-detect media type */
1455 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
1456 MII_AR_10T_FD_CAPS |
1457 MII_AR_100TX_HD_CAPS |
1458 MII_AR_100TX_FD_CAPS);
1459 if (hw->nic_type == athr_l1e) {
1460 mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
1461 }
1462
1463 /* flow control fixed to enable all */
1464 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
1465
1466 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
1467 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
1468
1469 ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
1470 if (ret_val)
1471 return ret_val;
1472
1473 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
1474 ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
1475 mii_1000t_ctrl_reg);
1476 if (ret_val)
1477 return ret_val;
1478 }
1479
1480 return 0;
1481 }
1482
1483
1484 /*
1485 * Resets the PHY and make all config validate
1486 *
1487 * hw - Struct containing variables accessed by shared code
1488 *
1489 * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
1490 */
atl1e_phy_commit(struct atl1e_hw * hw)1491 int atl1e_phy_commit(struct atl1e_hw *hw)
1492 {
1493 int ret_val;
1494 u16 phy_data;
1495
1496 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
1497
1498 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
1499 if (ret_val) {
1500 u32 val;
1501 int i;
1502 /**************************************
1503 * pcie serdes link may be down !
1504 **************************************/
1505 for (i = 0; i < 25; i++) {
1506 mdelay(1);
1507 val = AT_READ_REG(hw, REG_MDIO_CTRL);
1508 if (!(val & (MDIO_START | MDIO_BUSY)))
1509 break;
1510 }
1511
1512 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
1513 DBG("atl1e: PCI-E link down for at least 25ms\n");
1514 return ret_val;
1515 }
1516
1517 DBG("atl1e: PCI-E link up after %d ms\n", i);
1518 }
1519 return 0;
1520 }
1521
atl1e_phy_init(struct atl1e_hw * hw)1522 int atl1e_phy_init(struct atl1e_hw *hw)
1523 {
1524 s32 ret_val;
1525 u16 phy_val;
1526
1527 if (hw->phy_configured) {
1528 if (hw->re_autoneg) {
1529 hw->re_autoneg = 0;
1530 return atl1e_restart_autoneg(hw);
1531 }
1532 return 0;
1533 }
1534
1535 /* RESET GPHY Core */
1536 AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
1537 mdelay(2);
1538 AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
1539 GPHY_CTRL_EXT_RESET);
1540 mdelay(2);
1541
1542 /* patches */
1543 /* p1. eable hibernation mode */
1544 ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
1545 if (ret_val)
1546 return ret_val;
1547 ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
1548 if (ret_val)
1549 return ret_val;
1550 /* p2. set Class A/B for all modes */
1551 ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
1552 if (ret_val)
1553 return ret_val;
1554 phy_val = 0x02ef;
1555 /* remove Class AB */
1556 /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
1557 ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
1558 if (ret_val)
1559 return ret_val;
1560 /* p3. 10B ??? */
1561 ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
1562 if (ret_val)
1563 return ret_val;
1564 ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
1565 if (ret_val)
1566 return ret_val;
1567 /* p4. 1000T power */
1568 ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
1569 if (ret_val)
1570 return ret_val;
1571 ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
1572 if (ret_val)
1573 return ret_val;
1574
1575 ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
1576 if (ret_val)
1577 return ret_val;
1578 ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
1579 if (ret_val)
1580 return ret_val;
1581
1582 mdelay(1);
1583
1584 /*Enable PHY LinkChange Interrupt */
1585 ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
1586 if (ret_val) {
1587 DBG("atl1e: Error enable PHY linkChange Interrupt\n");
1588 return ret_val;
1589 }
1590 /* setup AutoNeg parameters */
1591 ret_val = atl1e_phy_setup_autoneg_adv(hw);
1592 if (ret_val) {
1593 DBG("atl1e: Error Setting up Auto-Negotiation\n");
1594 return ret_val;
1595 }
1596 /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
1597 DBG("atl1e: Restarting Auto-Neg");
1598 ret_val = atl1e_phy_commit(hw);
1599 if (ret_val) {
1600 DBG("atl1e: Error Resetting the phy");
1601 return ret_val;
1602 }
1603
1604 hw->phy_configured = 1;
1605
1606 return 0;
1607 }
1608
1609 /*
1610 * Reset the transmit and receive units; mask and clear all interrupts.
1611 * hw - Struct containing variables accessed by shared code
1612 * return : 0 or idle status (if error)
1613 */
atl1e_reset_hw(struct atl1e_hw * hw)1614 int atl1e_reset_hw(struct atl1e_hw *hw)
1615 {
1616 struct atl1e_adapter *adapter = hw->adapter;
1617 struct pci_device *pdev = adapter->pdev;
1618 int timeout = 0;
1619 u32 idle_status_data = 0;
1620 u16 pci_cfg_cmd_word = 0;
1621
1622 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
1623 pci_read_config_word(pdev, PCI_COMMAND, &pci_cfg_cmd_word);
1624 if ((pci_cfg_cmd_word & (PCI_COMMAND_IO | PCI_COMMAND_MEM |
1625 PCI_COMMAND_MASTER))
1626 != (PCI_COMMAND_IO | PCI_COMMAND_MEM |
1627 PCI_COMMAND_MASTER)) {
1628 pci_cfg_cmd_word |= (PCI_COMMAND_IO | PCI_COMMAND_MEM |
1629 PCI_COMMAND_MASTER);
1630 pci_write_config_word(pdev, PCI_COMMAND, pci_cfg_cmd_word);
1631 }
1632
1633 /*
1634 * Issue Soft Reset to the MAC. This will reset the chip's
1635 * transmit, receive, DMA. It will not effect
1636 * the current PCI configuration. The global reset bit is self-
1637 * clearing, and should clear within a microsecond.
1638 */
1639 AT_WRITE_REG(hw, REG_MASTER_CTRL,
1640 MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
1641 wmb();
1642 mdelay(1);
1643
1644 /* Wait at least 10ms for All module to be Idle */
1645 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
1646 idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
1647 if (idle_status_data == 0)
1648 break;
1649 mdelay(1);
1650 }
1651
1652 if (timeout >= AT_HW_MAX_IDLE_DELAY) {
1653 DBG("atl1e: MAC reset timeout\n");
1654 return AT_ERR_TIMEOUT;
1655 }
1656
1657 return 0;
1658 }
1659
1660
1661 /*
1662 * Performs basic configuration of the adapter.
1663 *
1664 * hw - Struct containing variables accessed by shared code
1665 * Assumes that the controller has previously been reset and is in a
1666 * post-reset uninitialized state. Initializes multicast table,
1667 * and Calls routines to setup link
1668 * Leaves the transmit and receive units disabled and uninitialized.
1669 */
atl1e_init_hw(struct atl1e_hw * hw)1670 int atl1e_init_hw(struct atl1e_hw *hw)
1671 {
1672 s32 ret_val = 0;
1673
1674 atl1e_init_pcie(hw);
1675
1676 /* Zero out the Multicast HASH table */
1677 /* clear the old settings from the multicast hash table */
1678 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
1679 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
1680
1681 ret_val = atl1e_phy_init(hw);
1682
1683 return ret_val;
1684 }
1685
1686 /*
1687 * Detects the current speed and duplex settings of the hardware.
1688 *
1689 * hw - Struct containing variables accessed by shared code
1690 * speed - Speed of the connection
1691 * duplex - Duplex setting of the connection
1692 */
atl1e_get_speed_and_duplex(struct atl1e_hw * hw,u16 * speed,u16 * duplex)1693 int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
1694 {
1695 int err;
1696 u16 phy_data;
1697
1698 /* Read PHY Specific Status Register (17) */
1699 err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
1700 if (err)
1701 return err;
1702
1703 if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
1704 return AT_ERR_PHY_RES;
1705
1706 switch (phy_data & MII_AT001_PSSR_SPEED) {
1707 case MII_AT001_PSSR_1000MBS:
1708 *speed = SPEED_1000;
1709 break;
1710 case MII_AT001_PSSR_100MBS:
1711 *speed = SPEED_100;
1712 break;
1713 case MII_AT001_PSSR_10MBS:
1714 *speed = SPEED_10;
1715 break;
1716 default:
1717 return AT_ERR_PHY_SPEED;
1718 break;
1719 }
1720
1721 if (phy_data & MII_AT001_PSSR_DPLX)
1722 *duplex = FULL_DUPLEX;
1723 else
1724 *duplex = HALF_DUPLEX;
1725
1726 return 0;
1727 }
1728
atl1e_restart_autoneg(struct atl1e_hw * hw)1729 int atl1e_restart_autoneg(struct atl1e_hw *hw)
1730 {
1731 int err = 0;
1732
1733 err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1734 if (err)
1735 return err;
1736
1737 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
1738 err = atl1e_write_phy_reg(hw, MII_AT001_CR,
1739 hw->mii_1000t_ctrl_reg);
1740 if (err)
1741 return err;
1742 }
1743
1744 err = atl1e_write_phy_reg(hw, MII_BMCR,
1745 MII_CR_RESET | MII_CR_AUTO_NEG_EN |
1746 MII_CR_RESTART_AUTO_NEG);
1747 return err;
1748 }
1749
1750