1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
15 *
16 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
23 *
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
26 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
65 #include <linux/unistd.h>
66 #include <linux/slab.h>
67 #include <linux/interrupt.h>
68 #include <linux/delay.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/skbuff.h>
72 #include <linux/if_vlan.h>
73 #include <linux/spinlock.h>
74 #include <linux/mm.h>
75 #include <linux/of_address.h>
76 #include <linux/of_irq.h>
77 #include <linux/of_mdio.h>
78 #include <linux/of_platform.h>
79 #include <linux/ip.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
82 #include <linux/in.h>
83 #include <linux/net_tstamp.h>
84
85 #include <asm/io.h>
86 #ifdef CONFIG_PPC
87 #include <asm/reg.h>
88 #include <asm/mpc85xx.h>
89 #endif
90 #include <asm/irq.h>
91 #include <linux/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97 #include <linux/phy_fixed.h>
98 #include <linux/of.h>
99 #include <linux/of_net.h>
100
101 #include "gianfar.h"
102
103 #define TX_TIMEOUT (5*HZ)
104
105 MODULE_AUTHOR("Freescale Semiconductor, Inc");
106 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107 MODULE_LICENSE("GPL");
108
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)109 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110 dma_addr_t buf)
111 {
112 u32 lstatus;
113
114 bdp->bufPtr = cpu_to_be32(buf);
115
116 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118 lstatus |= BD_LFLAG(RXBD_WRAP);
119
120 gfar_wmb();
121
122 bdp->lstatus = cpu_to_be32(lstatus);
123 }
124
gfar_init_tx_rx_base(struct gfar_private * priv)125 static void gfar_init_tx_rx_base(struct gfar_private *priv)
126 {
127 struct gfar __iomem *regs = priv->gfargrp[0].regs;
128 u32 __iomem *baddr;
129 int i;
130
131 baddr = ®s->tbase0;
132 for (i = 0; i < priv->num_tx_queues; i++) {
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134 baddr += 2;
135 }
136
137 baddr = ®s->rbase0;
138 for (i = 0; i < priv->num_rx_queues; i++) {
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140 baddr += 2;
141 }
142 }
143
gfar_init_rqprm(struct gfar_private * priv)144 static void gfar_init_rqprm(struct gfar_private *priv)
145 {
146 struct gfar __iomem *regs = priv->gfargrp[0].regs;
147 u32 __iomem *baddr;
148 int i;
149
150 baddr = ®s->rqprm0;
151 for (i = 0; i < priv->num_rx_queues; i++) {
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
153 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
154 baddr++;
155 }
156 }
157
gfar_rx_offload_en(struct gfar_private * priv)158 static void gfar_rx_offload_en(struct gfar_private *priv)
159 {
160 /* set this when rx hw offload (TOE) functions are being used */
161 priv->uses_rxfcb = 0;
162
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
164 priv->uses_rxfcb = 1;
165
166 if (priv->hwts_rx_en || priv->rx_filer_enable)
167 priv->uses_rxfcb = 1;
168 }
169
gfar_mac_rx_config(struct gfar_private * priv)170 static void gfar_mac_rx_config(struct gfar_private *priv)
171 {
172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
173 u32 rctrl = 0;
174
175 if (priv->rx_filer_enable) {
176 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177 /* Program the RIR0 reg with the required distribution */
178 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
179 }
180
181 /* Restore PROMISC mode */
182 if (priv->ndev->flags & IFF_PROMISC)
183 rctrl |= RCTRL_PROM;
184
185 if (priv->ndev->features & NETIF_F_RXCSUM)
186 rctrl |= RCTRL_CHECKSUMMING;
187
188 if (priv->extended_hash)
189 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
190
191 if (priv->padding) {
192 rctrl &= ~RCTRL_PAL_MASK;
193 rctrl |= RCTRL_PADDING(priv->padding);
194 }
195
196 /* Enable HW time stamping if requested from user space */
197 if (priv->hwts_rx_en)
198 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
199
200 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
201 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
202
203 /* Clear the LFC bit */
204 gfar_write(®s->rctrl, rctrl);
205 /* Init flow control threshold values */
206 gfar_init_rqprm(priv);
207 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
208 rctrl |= RCTRL_LFC;
209
210 /* Init rctrl based on our settings */
211 gfar_write(®s->rctrl, rctrl);
212 }
213
gfar_mac_tx_config(struct gfar_private * priv)214 static void gfar_mac_tx_config(struct gfar_private *priv)
215 {
216 struct gfar __iomem *regs = priv->gfargrp[0].regs;
217 u32 tctrl = 0;
218
219 if (priv->ndev->features & NETIF_F_IP_CSUM)
220 tctrl |= TCTRL_INIT_CSUM;
221
222 if (priv->prio_sched_en)
223 tctrl |= TCTRL_TXSCHED_PRIO;
224 else {
225 tctrl |= TCTRL_TXSCHED_WRRS;
226 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
227 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
228 }
229
230 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
231 tctrl |= TCTRL_VLINS;
232
233 gfar_write(®s->tctrl, tctrl);
234 }
235
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)236 static void gfar_configure_coalescing(struct gfar_private *priv,
237 unsigned long tx_mask, unsigned long rx_mask)
238 {
239 struct gfar __iomem *regs = priv->gfargrp[0].regs;
240 u32 __iomem *baddr;
241
242 if (priv->mode == MQ_MG_MODE) {
243 int i = 0;
244
245 baddr = ®s->txic0;
246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
247 gfar_write(baddr + i, 0);
248 if (likely(priv->tx_queue[i]->txcoalescing))
249 gfar_write(baddr + i, priv->tx_queue[i]->txic);
250 }
251
252 baddr = ®s->rxic0;
253 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
254 gfar_write(baddr + i, 0);
255 if (likely(priv->rx_queue[i]->rxcoalescing))
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
257 }
258 } else {
259 /* Backward compatible case -- even if we enable
260 * multiple queues, there's only single reg to program
261 */
262 gfar_write(®s->txic, 0);
263 if (likely(priv->tx_queue[0]->txcoalescing))
264 gfar_write(®s->txic, priv->tx_queue[0]->txic);
265
266 gfar_write(®s->rxic, 0);
267 if (unlikely(priv->rx_queue[0]->rxcoalescing))
268 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
269 }
270 }
271
gfar_configure_coalescing_all(struct gfar_private * priv)272 static void gfar_configure_coalescing_all(struct gfar_private *priv)
273 {
274 gfar_configure_coalescing(priv, 0xFF, 0xFF);
275 }
276
gfar_get_stats(struct net_device * dev)277 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
278 {
279 struct gfar_private *priv = netdev_priv(dev);
280 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
281 unsigned long tx_packets = 0, tx_bytes = 0;
282 int i;
283
284 for (i = 0; i < priv->num_rx_queues; i++) {
285 rx_packets += priv->rx_queue[i]->stats.rx_packets;
286 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
287 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
288 }
289
290 dev->stats.rx_packets = rx_packets;
291 dev->stats.rx_bytes = rx_bytes;
292 dev->stats.rx_dropped = rx_dropped;
293
294 for (i = 0; i < priv->num_tx_queues; i++) {
295 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
296 tx_packets += priv->tx_queue[i]->stats.tx_packets;
297 }
298
299 dev->stats.tx_bytes = tx_bytes;
300 dev->stats.tx_packets = tx_packets;
301
302 return &dev->stats;
303 }
304
305 /* Set the appropriate hash bit for the given addr */
306 /* The algorithm works like so:
307 * 1) Take the Destination Address (ie the multicast address), and
308 * do a CRC on it (little endian), and reverse the bits of the
309 * result.
310 * 2) Use the 8 most significant bits as a hash into a 256-entry
311 * table. The table is controlled through 8 32-bit registers:
312 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
313 * gaddr7. This means that the 3 most significant bits in the
314 * hash index which gaddr register to use, and the 5 other bits
315 * indicate which bit (assuming an IBM numbering scheme, which
316 * for PowerPC (tm) is usually the case) in the register holds
317 * the entry.
318 */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)319 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
320 {
321 u32 tempval;
322 struct gfar_private *priv = netdev_priv(dev);
323 u32 result = ether_crc(ETH_ALEN, addr);
324 int width = priv->hash_width;
325 u8 whichbit = (result >> (32 - width)) & 0x1f;
326 u8 whichreg = result >> (32 - width + 5);
327 u32 value = (1 << (31-whichbit));
328
329 tempval = gfar_read(priv->hash_regs[whichreg]);
330 tempval |= value;
331 gfar_write(priv->hash_regs[whichreg], tempval);
332 }
333
334 /* There are multiple MAC Address register pairs on some controllers
335 * This function sets the numth pair to a given address
336 */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)337 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
338 const u8 *addr)
339 {
340 struct gfar_private *priv = netdev_priv(dev);
341 struct gfar __iomem *regs = priv->gfargrp[0].regs;
342 u32 tempval;
343 u32 __iomem *macptr = ®s->macstnaddr1;
344
345 macptr += num*2;
346
347 /* For a station address of 0x12345678ABCD in transmission
348 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
349 * MACnADDR2 is set to 0x34120000.
350 */
351 tempval = (addr[5] << 24) | (addr[4] << 16) |
352 (addr[3] << 8) | addr[2];
353
354 gfar_write(macptr, tempval);
355
356 tempval = (addr[1] << 24) | (addr[0] << 16);
357
358 gfar_write(macptr+1, tempval);
359 }
360
gfar_set_mac_addr(struct net_device * dev,void * p)361 static int gfar_set_mac_addr(struct net_device *dev, void *p)
362 {
363 int ret;
364
365 ret = eth_mac_addr(dev, p);
366 if (ret)
367 return ret;
368
369 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
370
371 return 0;
372 }
373
gfar_ints_disable(struct gfar_private * priv)374 static void gfar_ints_disable(struct gfar_private *priv)
375 {
376 int i;
377 for (i = 0; i < priv->num_grps; i++) {
378 struct gfar __iomem *regs = priv->gfargrp[i].regs;
379 /* Clear IEVENT */
380 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
381
382 /* Initialize IMASK */
383 gfar_write(®s->imask, IMASK_INIT_CLEAR);
384 }
385 }
386
gfar_ints_enable(struct gfar_private * priv)387 static void gfar_ints_enable(struct gfar_private *priv)
388 {
389 int i;
390 for (i = 0; i < priv->num_grps; i++) {
391 struct gfar __iomem *regs = priv->gfargrp[i].regs;
392 /* Unmask the interrupts we look for */
393 gfar_write(®s->imask, IMASK_DEFAULT);
394 }
395 }
396
gfar_alloc_tx_queues(struct gfar_private * priv)397 static int gfar_alloc_tx_queues(struct gfar_private *priv)
398 {
399 int i;
400
401 for (i = 0; i < priv->num_tx_queues; i++) {
402 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
403 GFP_KERNEL);
404 if (!priv->tx_queue[i])
405 return -ENOMEM;
406
407 priv->tx_queue[i]->tx_skbuff = NULL;
408 priv->tx_queue[i]->qindex = i;
409 priv->tx_queue[i]->dev = priv->ndev;
410 spin_lock_init(&(priv->tx_queue[i]->txlock));
411 }
412 return 0;
413 }
414
gfar_alloc_rx_queues(struct gfar_private * priv)415 static int gfar_alloc_rx_queues(struct gfar_private *priv)
416 {
417 int i;
418
419 for (i = 0; i < priv->num_rx_queues; i++) {
420 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
421 GFP_KERNEL);
422 if (!priv->rx_queue[i])
423 return -ENOMEM;
424
425 priv->rx_queue[i]->qindex = i;
426 priv->rx_queue[i]->ndev = priv->ndev;
427 }
428 return 0;
429 }
430
gfar_free_tx_queues(struct gfar_private * priv)431 static void gfar_free_tx_queues(struct gfar_private *priv)
432 {
433 int i;
434
435 for (i = 0; i < priv->num_tx_queues; i++)
436 kfree(priv->tx_queue[i]);
437 }
438
gfar_free_rx_queues(struct gfar_private * priv)439 static void gfar_free_rx_queues(struct gfar_private *priv)
440 {
441 int i;
442
443 for (i = 0; i < priv->num_rx_queues; i++)
444 kfree(priv->rx_queue[i]);
445 }
446
unmap_group_regs(struct gfar_private * priv)447 static void unmap_group_regs(struct gfar_private *priv)
448 {
449 int i;
450
451 for (i = 0; i < MAXGROUPS; i++)
452 if (priv->gfargrp[i].regs)
453 iounmap(priv->gfargrp[i].regs);
454 }
455
free_gfar_dev(struct gfar_private * priv)456 static void free_gfar_dev(struct gfar_private *priv)
457 {
458 int i, j;
459
460 for (i = 0; i < priv->num_grps; i++)
461 for (j = 0; j < GFAR_NUM_IRQS; j++) {
462 kfree(priv->gfargrp[i].irqinfo[j]);
463 priv->gfargrp[i].irqinfo[j] = NULL;
464 }
465
466 free_netdev(priv->ndev);
467 }
468
disable_napi(struct gfar_private * priv)469 static void disable_napi(struct gfar_private *priv)
470 {
471 int i;
472
473 for (i = 0; i < priv->num_grps; i++) {
474 napi_disable(&priv->gfargrp[i].napi_rx);
475 napi_disable(&priv->gfargrp[i].napi_tx);
476 }
477 }
478
enable_napi(struct gfar_private * priv)479 static void enable_napi(struct gfar_private *priv)
480 {
481 int i;
482
483 for (i = 0; i < priv->num_grps; i++) {
484 napi_enable(&priv->gfargrp[i].napi_rx);
485 napi_enable(&priv->gfargrp[i].napi_tx);
486 }
487 }
488
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)489 static int gfar_parse_group(struct device_node *np,
490 struct gfar_private *priv, const char *model)
491 {
492 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
493 int i;
494
495 for (i = 0; i < GFAR_NUM_IRQS; i++) {
496 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
497 GFP_KERNEL);
498 if (!grp->irqinfo[i])
499 return -ENOMEM;
500 }
501
502 grp->regs = of_iomap(np, 0);
503 if (!grp->regs)
504 return -ENOMEM;
505
506 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
507
508 /* If we aren't the FEC we have multiple interrupts */
509 if (model && strcasecmp(model, "FEC")) {
510 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
511 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
512 if (!gfar_irq(grp, TX)->irq ||
513 !gfar_irq(grp, RX)->irq ||
514 !gfar_irq(grp, ER)->irq)
515 return -EINVAL;
516 }
517
518 grp->priv = priv;
519 spin_lock_init(&grp->grplock);
520 if (priv->mode == MQ_MG_MODE) {
521 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
522 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
523 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
524 } else {
525 grp->rx_bit_map = 0xFF;
526 grp->tx_bit_map = 0xFF;
527 }
528
529 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
530 * right to left, so we need to revert the 8 bits to get the q index
531 */
532 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
533 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
534
535 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
536 * also assign queues to groups
537 */
538 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
539 if (!grp->rx_queue)
540 grp->rx_queue = priv->rx_queue[i];
541 grp->num_rx_queues++;
542 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
543 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
544 priv->rx_queue[i]->grp = grp;
545 }
546
547 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
548 if (!grp->tx_queue)
549 grp->tx_queue = priv->tx_queue[i];
550 grp->num_tx_queues++;
551 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
552 priv->tqueue |= (TQUEUE_EN0 >> i);
553 priv->tx_queue[i]->grp = grp;
554 }
555
556 priv->num_grps++;
557
558 return 0;
559 }
560
gfar_of_group_count(struct device_node * np)561 static int gfar_of_group_count(struct device_node *np)
562 {
563 struct device_node *child;
564 int num = 0;
565
566 for_each_available_child_of_node(np, child)
567 if (of_node_name_eq(child, "queue-group"))
568 num++;
569
570 return num;
571 }
572
573 /* Reads the controller's registers to determine what interface
574 * connects it to the PHY.
575 */
gfar_get_interface(struct net_device * dev)576 static phy_interface_t gfar_get_interface(struct net_device *dev)
577 {
578 struct gfar_private *priv = netdev_priv(dev);
579 struct gfar __iomem *regs = priv->gfargrp[0].regs;
580 u32 ecntrl;
581
582 ecntrl = gfar_read(®s->ecntrl);
583
584 if (ecntrl & ECNTRL_SGMII_MODE)
585 return PHY_INTERFACE_MODE_SGMII;
586
587 if (ecntrl & ECNTRL_TBI_MODE) {
588 if (ecntrl & ECNTRL_REDUCED_MODE)
589 return PHY_INTERFACE_MODE_RTBI;
590 else
591 return PHY_INTERFACE_MODE_TBI;
592 }
593
594 if (ecntrl & ECNTRL_REDUCED_MODE) {
595 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
596 return PHY_INTERFACE_MODE_RMII;
597 }
598 else {
599 phy_interface_t interface = priv->interface;
600
601 /* This isn't autodetected right now, so it must
602 * be set by the device tree or platform code.
603 */
604 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
605 return PHY_INTERFACE_MODE_RGMII_ID;
606
607 return PHY_INTERFACE_MODE_RGMII;
608 }
609 }
610
611 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
612 return PHY_INTERFACE_MODE_GMII;
613
614 return PHY_INTERFACE_MODE_MII;
615 }
616
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)617 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
618 {
619 const char *model;
620 int err = 0, i;
621 phy_interface_t interface;
622 struct net_device *dev = NULL;
623 struct gfar_private *priv = NULL;
624 struct device_node *np = ofdev->dev.of_node;
625 struct device_node *child = NULL;
626 u32 stash_len = 0;
627 u32 stash_idx = 0;
628 unsigned int num_tx_qs, num_rx_qs;
629 unsigned short mode;
630
631 if (!np)
632 return -ENODEV;
633
634 if (of_device_is_compatible(np, "fsl,etsec2"))
635 mode = MQ_MG_MODE;
636 else
637 mode = SQ_SG_MODE;
638
639 if (mode == SQ_SG_MODE) {
640 num_tx_qs = 1;
641 num_rx_qs = 1;
642 } else { /* MQ_MG_MODE */
643 /* get the actual number of supported groups */
644 unsigned int num_grps = gfar_of_group_count(np);
645
646 if (num_grps == 0 || num_grps > MAXGROUPS) {
647 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
648 num_grps);
649 pr_err("Cannot do alloc_etherdev, aborting\n");
650 return -EINVAL;
651 }
652
653 num_tx_qs = num_grps; /* one txq per int group */
654 num_rx_qs = num_grps; /* one rxq per int group */
655 }
656
657 if (num_tx_qs > MAX_TX_QS) {
658 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
659 num_tx_qs, MAX_TX_QS);
660 pr_err("Cannot do alloc_etherdev, aborting\n");
661 return -EINVAL;
662 }
663
664 if (num_rx_qs > MAX_RX_QS) {
665 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
666 num_rx_qs, MAX_RX_QS);
667 pr_err("Cannot do alloc_etherdev, aborting\n");
668 return -EINVAL;
669 }
670
671 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
672 dev = *pdev;
673 if (NULL == dev)
674 return -ENOMEM;
675
676 priv = netdev_priv(dev);
677 priv->ndev = dev;
678
679 priv->mode = mode;
680
681 priv->num_tx_queues = num_tx_qs;
682 netif_set_real_num_rx_queues(dev, num_rx_qs);
683 priv->num_rx_queues = num_rx_qs;
684
685 err = gfar_alloc_tx_queues(priv);
686 if (err)
687 goto tx_alloc_failed;
688
689 err = gfar_alloc_rx_queues(priv);
690 if (err)
691 goto rx_alloc_failed;
692
693 err = of_property_read_string(np, "model", &model);
694 if (err) {
695 pr_err("Device model property missing, aborting\n");
696 goto rx_alloc_failed;
697 }
698
699 /* Init Rx queue filer rule set linked list */
700 INIT_LIST_HEAD(&priv->rx_list.list);
701 priv->rx_list.count = 0;
702 mutex_init(&priv->rx_queue_access);
703
704 for (i = 0; i < MAXGROUPS; i++)
705 priv->gfargrp[i].regs = NULL;
706
707 /* Parse and initialize group specific information */
708 if (priv->mode == MQ_MG_MODE) {
709 for_each_available_child_of_node(np, child) {
710 if (!of_node_name_eq(child, "queue-group"))
711 continue;
712
713 err = gfar_parse_group(child, priv, model);
714 if (err) {
715 of_node_put(child);
716 goto err_grp_init;
717 }
718 }
719 } else { /* SQ_SG_MODE */
720 err = gfar_parse_group(np, priv, model);
721 if (err)
722 goto err_grp_init;
723 }
724
725 if (of_property_read_bool(np, "bd-stash")) {
726 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
727 priv->bd_stash_en = 1;
728 }
729
730 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
731
732 if (err == 0)
733 priv->rx_stash_size = stash_len;
734
735 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
736
737 if (err == 0)
738 priv->rx_stash_index = stash_idx;
739
740 if (stash_len || stash_idx)
741 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
742
743 err = of_get_mac_address(np, dev->dev_addr);
744 if (err) {
745 eth_hw_addr_random(dev);
746 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
747 }
748
749 if (model && !strcasecmp(model, "TSEC"))
750 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
751 FSL_GIANFAR_DEV_HAS_COALESCE |
752 FSL_GIANFAR_DEV_HAS_RMON |
753 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
754
755 if (model && !strcasecmp(model, "eTSEC"))
756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
757 FSL_GIANFAR_DEV_HAS_COALESCE |
758 FSL_GIANFAR_DEV_HAS_RMON |
759 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
760 FSL_GIANFAR_DEV_HAS_CSUM |
761 FSL_GIANFAR_DEV_HAS_VLAN |
762 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
763 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
764 FSL_GIANFAR_DEV_HAS_TIMER |
765 FSL_GIANFAR_DEV_HAS_RX_FILER;
766
767 /* Use PHY connection type from the DT node if one is specified there.
768 * rgmii-id really needs to be specified. Other types can be
769 * detected by hardware
770 */
771 err = of_get_phy_mode(np, &interface);
772 if (!err)
773 priv->interface = interface;
774 else
775 priv->interface = gfar_get_interface(dev);
776
777 if (of_find_property(np, "fsl,magic-packet", NULL))
778 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
779
780 if (of_get_property(np, "fsl,wake-on-filer", NULL))
781 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
782
783 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
784
785 /* In the case of a fixed PHY, the DT node associated
786 * to the PHY is the Ethernet MAC DT node.
787 */
788 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
789 err = of_phy_register_fixed_link(np);
790 if (err)
791 goto err_grp_init;
792
793 priv->phy_node = of_node_get(np);
794 }
795
796 /* Find the TBI PHY. If it's not there, we don't support SGMII */
797 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
798
799 return 0;
800
801 err_grp_init:
802 unmap_group_regs(priv);
803 rx_alloc_failed:
804 gfar_free_rx_queues(priv);
805 tx_alloc_failed:
806 gfar_free_tx_queues(priv);
807 free_gfar_dev(priv);
808 return err;
809 }
810
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)811 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
812 u32 class)
813 {
814 u32 rqfpr = FPR_FILER_MASK;
815 u32 rqfcr = 0x0;
816
817 rqfar--;
818 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
819 priv->ftp_rqfpr[rqfar] = rqfpr;
820 priv->ftp_rqfcr[rqfar] = rqfcr;
821 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
822
823 rqfar--;
824 rqfcr = RQFCR_CMP_NOMATCH;
825 priv->ftp_rqfpr[rqfar] = rqfpr;
826 priv->ftp_rqfcr[rqfar] = rqfcr;
827 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
828
829 rqfar--;
830 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
831 rqfpr = class;
832 priv->ftp_rqfcr[rqfar] = rqfcr;
833 priv->ftp_rqfpr[rqfar] = rqfpr;
834 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
835
836 rqfar--;
837 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
838 rqfpr = class;
839 priv->ftp_rqfcr[rqfar] = rqfcr;
840 priv->ftp_rqfpr[rqfar] = rqfpr;
841 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
842
843 return rqfar;
844 }
845
gfar_init_filer_table(struct gfar_private * priv)846 static void gfar_init_filer_table(struct gfar_private *priv)
847 {
848 int i = 0x0;
849 u32 rqfar = MAX_FILER_IDX;
850 u32 rqfcr = 0x0;
851 u32 rqfpr = FPR_FILER_MASK;
852
853 /* Default rule */
854 rqfcr = RQFCR_CMP_MATCH;
855 priv->ftp_rqfcr[rqfar] = rqfcr;
856 priv->ftp_rqfpr[rqfar] = rqfpr;
857 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
858
859 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
860 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
861 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
862 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
863 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
864 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
865
866 /* cur_filer_idx indicated the first non-masked rule */
867 priv->cur_filer_idx = rqfar;
868
869 /* Rest are masked rules */
870 rqfcr = RQFCR_CMP_NOMATCH;
871 for (i = 0; i < rqfar; i++) {
872 priv->ftp_rqfcr[i] = rqfcr;
873 priv->ftp_rqfpr[i] = rqfpr;
874 gfar_write_filer(priv, i, rqfcr, rqfpr);
875 }
876 }
877
878 #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)879 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
880 {
881 unsigned int pvr = mfspr(SPRN_PVR);
882 unsigned int svr = mfspr(SPRN_SVR);
883 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
884 unsigned int rev = svr & 0xffff;
885
886 /* MPC8313 Rev 2.0 and higher; All MPC837x */
887 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
888 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
889 priv->errata |= GFAR_ERRATA_74;
890
891 /* MPC8313 and MPC837x all rev */
892 if ((pvr == 0x80850010 && mod == 0x80b0) ||
893 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
894 priv->errata |= GFAR_ERRATA_76;
895
896 /* MPC8313 Rev < 2.0 */
897 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
898 priv->errata |= GFAR_ERRATA_12;
899 }
900
__gfar_detect_errata_85xx(struct gfar_private * priv)901 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
902 {
903 unsigned int svr = mfspr(SPRN_SVR);
904
905 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
906 priv->errata |= GFAR_ERRATA_12;
907 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
908 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
909 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
910 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
911 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
912 }
913 #endif
914
gfar_detect_errata(struct gfar_private * priv)915 static void gfar_detect_errata(struct gfar_private *priv)
916 {
917 struct device *dev = &priv->ofdev->dev;
918
919 /* no plans to fix */
920 priv->errata |= GFAR_ERRATA_A002;
921
922 #ifdef CONFIG_PPC
923 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
924 __gfar_detect_errata_85xx(priv);
925 else /* non-mpc85xx parts, i.e. e300 core based */
926 __gfar_detect_errata_83xx(priv);
927 #endif
928
929 if (priv->errata)
930 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
931 priv->errata);
932 }
933
gfar_init_addr_hash_table(struct gfar_private * priv)934 static void gfar_init_addr_hash_table(struct gfar_private *priv)
935 {
936 struct gfar __iomem *regs = priv->gfargrp[0].regs;
937
938 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
939 priv->extended_hash = 1;
940 priv->hash_width = 9;
941
942 priv->hash_regs[0] = ®s->igaddr0;
943 priv->hash_regs[1] = ®s->igaddr1;
944 priv->hash_regs[2] = ®s->igaddr2;
945 priv->hash_regs[3] = ®s->igaddr3;
946 priv->hash_regs[4] = ®s->igaddr4;
947 priv->hash_regs[5] = ®s->igaddr5;
948 priv->hash_regs[6] = ®s->igaddr6;
949 priv->hash_regs[7] = ®s->igaddr7;
950 priv->hash_regs[8] = ®s->gaddr0;
951 priv->hash_regs[9] = ®s->gaddr1;
952 priv->hash_regs[10] = ®s->gaddr2;
953 priv->hash_regs[11] = ®s->gaddr3;
954 priv->hash_regs[12] = ®s->gaddr4;
955 priv->hash_regs[13] = ®s->gaddr5;
956 priv->hash_regs[14] = ®s->gaddr6;
957 priv->hash_regs[15] = ®s->gaddr7;
958
959 } else {
960 priv->extended_hash = 0;
961 priv->hash_width = 8;
962
963 priv->hash_regs[0] = ®s->gaddr0;
964 priv->hash_regs[1] = ®s->gaddr1;
965 priv->hash_regs[2] = ®s->gaddr2;
966 priv->hash_regs[3] = ®s->gaddr3;
967 priv->hash_regs[4] = ®s->gaddr4;
968 priv->hash_regs[5] = ®s->gaddr5;
969 priv->hash_regs[6] = ®s->gaddr6;
970 priv->hash_regs[7] = ®s->gaddr7;
971 }
972 }
973
__gfar_is_rx_idle(struct gfar_private * priv)974 static int __gfar_is_rx_idle(struct gfar_private *priv)
975 {
976 u32 res;
977
978 /* Normaly TSEC should not hang on GRS commands, so we should
979 * actually wait for IEVENT_GRSC flag.
980 */
981 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
982 return 0;
983
984 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
985 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
986 * and the Rx can be safely reset.
987 */
988 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
989 res &= 0x7f807f80;
990 if ((res & 0xffff) == (res >> 16))
991 return 1;
992
993 return 0;
994 }
995
996 /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)997 static void gfar_halt_nodisable(struct gfar_private *priv)
998 {
999 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1000 u32 tempval;
1001 unsigned int timeout;
1002 int stopped;
1003
1004 gfar_ints_disable(priv);
1005
1006 if (gfar_is_dma_stopped(priv))
1007 return;
1008
1009 /* Stop the DMA, and wait for it to stop */
1010 tempval = gfar_read(®s->dmactrl);
1011 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1012 gfar_write(®s->dmactrl, tempval);
1013
1014 retry:
1015 timeout = 1000;
1016 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1017 cpu_relax();
1018 timeout--;
1019 }
1020
1021 if (!timeout)
1022 stopped = gfar_is_dma_stopped(priv);
1023
1024 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1025 !__gfar_is_rx_idle(priv))
1026 goto retry;
1027 }
1028
1029 /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)1030 static void gfar_halt(struct gfar_private *priv)
1031 {
1032 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1033 u32 tempval;
1034
1035 /* Dissable the Rx/Tx hw queues */
1036 gfar_write(®s->rqueue, 0);
1037 gfar_write(®s->tqueue, 0);
1038
1039 mdelay(10);
1040
1041 gfar_halt_nodisable(priv);
1042
1043 /* Disable Rx/Tx DMA */
1044 tempval = gfar_read(®s->maccfg1);
1045 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1046 gfar_write(®s->maccfg1, tempval);
1047 }
1048
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1049 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1050 {
1051 struct txbd8 *txbdp;
1052 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1053 int i, j;
1054
1055 txbdp = tx_queue->tx_bd_base;
1056
1057 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1058 if (!tx_queue->tx_skbuff[i])
1059 continue;
1060
1061 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1062 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1063 txbdp->lstatus = 0;
1064 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1065 j++) {
1066 txbdp++;
1067 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1068 be16_to_cpu(txbdp->length),
1069 DMA_TO_DEVICE);
1070 }
1071 txbdp++;
1072 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1073 tx_queue->tx_skbuff[i] = NULL;
1074 }
1075 kfree(tx_queue->tx_skbuff);
1076 tx_queue->tx_skbuff = NULL;
1077 }
1078
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1079 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1080 {
1081 int i;
1082
1083 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1084
1085 dev_kfree_skb(rx_queue->skb);
1086
1087 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1088 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1089
1090 rxbdp->lstatus = 0;
1091 rxbdp->bufPtr = 0;
1092 rxbdp++;
1093
1094 if (!rxb->page)
1095 continue;
1096
1097 dma_unmap_page(rx_queue->dev, rxb->dma,
1098 PAGE_SIZE, DMA_FROM_DEVICE);
1099 __free_page(rxb->page);
1100
1101 rxb->page = NULL;
1102 }
1103
1104 kfree(rx_queue->rx_buff);
1105 rx_queue->rx_buff = NULL;
1106 }
1107
1108 /* If there are any tx skbs or rx skbs still around, free them.
1109 * Then free tx_skbuff and rx_skbuff
1110 */
free_skb_resources(struct gfar_private * priv)1111 static void free_skb_resources(struct gfar_private *priv)
1112 {
1113 struct gfar_priv_tx_q *tx_queue = NULL;
1114 struct gfar_priv_rx_q *rx_queue = NULL;
1115 int i;
1116
1117 /* Go through all the buffer descriptors and free their data buffers */
1118 for (i = 0; i < priv->num_tx_queues; i++) {
1119 struct netdev_queue *txq;
1120
1121 tx_queue = priv->tx_queue[i];
1122 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1123 if (tx_queue->tx_skbuff)
1124 free_skb_tx_queue(tx_queue);
1125 netdev_tx_reset_queue(txq);
1126 }
1127
1128 for (i = 0; i < priv->num_rx_queues; i++) {
1129 rx_queue = priv->rx_queue[i];
1130 if (rx_queue->rx_buff)
1131 free_skb_rx_queue(rx_queue);
1132 }
1133
1134 dma_free_coherent(priv->dev,
1135 sizeof(struct txbd8) * priv->total_tx_ring_size +
1136 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1137 priv->tx_queue[0]->tx_bd_base,
1138 priv->tx_queue[0]->tx_bd_dma_base);
1139 }
1140
stop_gfar(struct net_device * dev)1141 void stop_gfar(struct net_device *dev)
1142 {
1143 struct gfar_private *priv = netdev_priv(dev);
1144
1145 netif_tx_stop_all_queues(dev);
1146
1147 smp_mb__before_atomic();
1148 set_bit(GFAR_DOWN, &priv->state);
1149 smp_mb__after_atomic();
1150
1151 disable_napi(priv);
1152
1153 /* disable ints and gracefully shut down Rx/Tx DMA */
1154 gfar_halt(priv);
1155
1156 phy_stop(dev->phydev);
1157
1158 free_skb_resources(priv);
1159 }
1160
gfar_start(struct gfar_private * priv)1161 static void gfar_start(struct gfar_private *priv)
1162 {
1163 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1164 u32 tempval;
1165 int i = 0;
1166
1167 /* Enable Rx/Tx hw queues */
1168 gfar_write(®s->rqueue, priv->rqueue);
1169 gfar_write(®s->tqueue, priv->tqueue);
1170
1171 /* Initialize DMACTRL to have WWR and WOP */
1172 tempval = gfar_read(®s->dmactrl);
1173 tempval |= DMACTRL_INIT_SETTINGS;
1174 gfar_write(®s->dmactrl, tempval);
1175
1176 /* Make sure we aren't stopped */
1177 tempval = gfar_read(®s->dmactrl);
1178 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1179 gfar_write(®s->dmactrl, tempval);
1180
1181 for (i = 0; i < priv->num_grps; i++) {
1182 regs = priv->gfargrp[i].regs;
1183 /* Clear THLT/RHLT, so that the DMA starts polling now */
1184 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1185 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1186 }
1187
1188 /* Enable Rx/Tx DMA */
1189 tempval = gfar_read(®s->maccfg1);
1190 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1191 gfar_write(®s->maccfg1, tempval);
1192
1193 gfar_ints_enable(priv);
1194
1195 netif_trans_update(priv->ndev); /* prevent tx timeout */
1196 }
1197
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)1198 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1199 {
1200 struct page *page;
1201 dma_addr_t addr;
1202
1203 page = dev_alloc_page();
1204 if (unlikely(!page))
1205 return false;
1206
1207 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1208 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1209 __free_page(page);
1210
1211 return false;
1212 }
1213
1214 rxb->dma = addr;
1215 rxb->page = page;
1216 rxb->page_offset = 0;
1217
1218 return true;
1219 }
1220
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)1221 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1222 {
1223 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1224 struct gfar_extra_stats *estats = &priv->extra_stats;
1225
1226 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1227 atomic64_inc(&estats->rx_alloc_err);
1228 }
1229
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)1230 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1231 int alloc_cnt)
1232 {
1233 struct rxbd8 *bdp;
1234 struct gfar_rx_buff *rxb;
1235 int i;
1236
1237 i = rx_queue->next_to_use;
1238 bdp = &rx_queue->rx_bd_base[i];
1239 rxb = &rx_queue->rx_buff[i];
1240
1241 while (alloc_cnt--) {
1242 /* try reuse page */
1243 if (unlikely(!rxb->page)) {
1244 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1245 gfar_rx_alloc_err(rx_queue);
1246 break;
1247 }
1248 }
1249
1250 /* Setup the new RxBD */
1251 gfar_init_rxbdp(rx_queue, bdp,
1252 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1253
1254 /* Update to the next pointer */
1255 bdp++;
1256 rxb++;
1257
1258 if (unlikely(++i == rx_queue->rx_ring_size)) {
1259 i = 0;
1260 bdp = rx_queue->rx_bd_base;
1261 rxb = rx_queue->rx_buff;
1262 }
1263 }
1264
1265 rx_queue->next_to_use = i;
1266 rx_queue->next_to_alloc = i;
1267 }
1268
gfar_init_bds(struct net_device * ndev)1269 static void gfar_init_bds(struct net_device *ndev)
1270 {
1271 struct gfar_private *priv = netdev_priv(ndev);
1272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1273 struct gfar_priv_tx_q *tx_queue = NULL;
1274 struct gfar_priv_rx_q *rx_queue = NULL;
1275 struct txbd8 *txbdp;
1276 u32 __iomem *rfbptr;
1277 int i, j;
1278
1279 for (i = 0; i < priv->num_tx_queues; i++) {
1280 tx_queue = priv->tx_queue[i];
1281 /* Initialize some variables in our dev structure */
1282 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1283 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1284 tx_queue->cur_tx = tx_queue->tx_bd_base;
1285 tx_queue->skb_curtx = 0;
1286 tx_queue->skb_dirtytx = 0;
1287
1288 /* Initialize Transmit Descriptor Ring */
1289 txbdp = tx_queue->tx_bd_base;
1290 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1291 txbdp->lstatus = 0;
1292 txbdp->bufPtr = 0;
1293 txbdp++;
1294 }
1295
1296 /* Set the last descriptor in the ring to indicate wrap */
1297 txbdp--;
1298 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1299 TXBD_WRAP);
1300 }
1301
1302 rfbptr = ®s->rfbptr0;
1303 for (i = 0; i < priv->num_rx_queues; i++) {
1304 rx_queue = priv->rx_queue[i];
1305
1306 rx_queue->next_to_clean = 0;
1307 rx_queue->next_to_use = 0;
1308 rx_queue->next_to_alloc = 0;
1309
1310 /* make sure next_to_clean != next_to_use after this
1311 * by leaving at least 1 unused descriptor
1312 */
1313 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1314
1315 rx_queue->rfbptr = rfbptr;
1316 rfbptr += 2;
1317 }
1318 }
1319
gfar_alloc_skb_resources(struct net_device * ndev)1320 static int gfar_alloc_skb_resources(struct net_device *ndev)
1321 {
1322 void *vaddr;
1323 dma_addr_t addr;
1324 int i, j;
1325 struct gfar_private *priv = netdev_priv(ndev);
1326 struct device *dev = priv->dev;
1327 struct gfar_priv_tx_q *tx_queue = NULL;
1328 struct gfar_priv_rx_q *rx_queue = NULL;
1329
1330 priv->total_tx_ring_size = 0;
1331 for (i = 0; i < priv->num_tx_queues; i++)
1332 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1333
1334 priv->total_rx_ring_size = 0;
1335 for (i = 0; i < priv->num_rx_queues; i++)
1336 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1337
1338 /* Allocate memory for the buffer descriptors */
1339 vaddr = dma_alloc_coherent(dev,
1340 (priv->total_tx_ring_size *
1341 sizeof(struct txbd8)) +
1342 (priv->total_rx_ring_size *
1343 sizeof(struct rxbd8)),
1344 &addr, GFP_KERNEL);
1345 if (!vaddr)
1346 return -ENOMEM;
1347
1348 for (i = 0; i < priv->num_tx_queues; i++) {
1349 tx_queue = priv->tx_queue[i];
1350 tx_queue->tx_bd_base = vaddr;
1351 tx_queue->tx_bd_dma_base = addr;
1352 tx_queue->dev = ndev;
1353 /* enet DMA only understands physical addresses */
1354 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1355 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1356 }
1357
1358 /* Start the rx descriptor ring where the tx ring leaves off */
1359 for (i = 0; i < priv->num_rx_queues; i++) {
1360 rx_queue = priv->rx_queue[i];
1361 rx_queue->rx_bd_base = vaddr;
1362 rx_queue->rx_bd_dma_base = addr;
1363 rx_queue->ndev = ndev;
1364 rx_queue->dev = dev;
1365 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1366 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1367 }
1368
1369 /* Setup the skbuff rings */
1370 for (i = 0; i < priv->num_tx_queues; i++) {
1371 tx_queue = priv->tx_queue[i];
1372 tx_queue->tx_skbuff =
1373 kmalloc_array(tx_queue->tx_ring_size,
1374 sizeof(*tx_queue->tx_skbuff),
1375 GFP_KERNEL);
1376 if (!tx_queue->tx_skbuff)
1377 goto cleanup;
1378
1379 for (j = 0; j < tx_queue->tx_ring_size; j++)
1380 tx_queue->tx_skbuff[j] = NULL;
1381 }
1382
1383 for (i = 0; i < priv->num_rx_queues; i++) {
1384 rx_queue = priv->rx_queue[i];
1385 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1386 sizeof(*rx_queue->rx_buff),
1387 GFP_KERNEL);
1388 if (!rx_queue->rx_buff)
1389 goto cleanup;
1390 }
1391
1392 gfar_init_bds(ndev);
1393
1394 return 0;
1395
1396 cleanup:
1397 free_skb_resources(priv);
1398 return -ENOMEM;
1399 }
1400
1401 /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1402 int startup_gfar(struct net_device *ndev)
1403 {
1404 struct gfar_private *priv = netdev_priv(ndev);
1405 int err;
1406
1407 gfar_mac_reset(priv);
1408
1409 err = gfar_alloc_skb_resources(ndev);
1410 if (err)
1411 return err;
1412
1413 gfar_init_tx_rx_base(priv);
1414
1415 smp_mb__before_atomic();
1416 clear_bit(GFAR_DOWN, &priv->state);
1417 smp_mb__after_atomic();
1418
1419 /* Start Rx/Tx DMA and enable the interrupts */
1420 gfar_start(priv);
1421
1422 /* force link state update after mac reset */
1423 priv->oldlink = 0;
1424 priv->oldspeed = 0;
1425 priv->oldduplex = -1;
1426
1427 phy_start(ndev->phydev);
1428
1429 enable_napi(priv);
1430
1431 netif_tx_wake_all_queues(ndev);
1432
1433 return 0;
1434 }
1435
gfar_get_flowctrl_cfg(struct gfar_private * priv)1436 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1437 {
1438 struct net_device *ndev = priv->ndev;
1439 struct phy_device *phydev = ndev->phydev;
1440 u32 val = 0;
1441
1442 if (!phydev->duplex)
1443 return val;
1444
1445 if (!priv->pause_aneg_en) {
1446 if (priv->tx_pause_en)
1447 val |= MACCFG1_TX_FLOW;
1448 if (priv->rx_pause_en)
1449 val |= MACCFG1_RX_FLOW;
1450 } else {
1451 u16 lcl_adv, rmt_adv;
1452 u8 flowctrl;
1453 /* get link partner capabilities */
1454 rmt_adv = 0;
1455 if (phydev->pause)
1456 rmt_adv = LPA_PAUSE_CAP;
1457 if (phydev->asym_pause)
1458 rmt_adv |= LPA_PAUSE_ASYM;
1459
1460 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1461 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1462 if (flowctrl & FLOW_CTRL_TX)
1463 val |= MACCFG1_TX_FLOW;
1464 if (flowctrl & FLOW_CTRL_RX)
1465 val |= MACCFG1_RX_FLOW;
1466 }
1467
1468 return val;
1469 }
1470
gfar_update_link_state(struct gfar_private * priv)1471 static noinline void gfar_update_link_state(struct gfar_private *priv)
1472 {
1473 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1474 struct net_device *ndev = priv->ndev;
1475 struct phy_device *phydev = ndev->phydev;
1476 struct gfar_priv_rx_q *rx_queue = NULL;
1477 int i;
1478
1479 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1480 return;
1481
1482 if (phydev->link) {
1483 u32 tempval1 = gfar_read(®s->maccfg1);
1484 u32 tempval = gfar_read(®s->maccfg2);
1485 u32 ecntrl = gfar_read(®s->ecntrl);
1486 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1487
1488 if (phydev->duplex != priv->oldduplex) {
1489 if (!(phydev->duplex))
1490 tempval &= ~(MACCFG2_FULL_DUPLEX);
1491 else
1492 tempval |= MACCFG2_FULL_DUPLEX;
1493
1494 priv->oldduplex = phydev->duplex;
1495 }
1496
1497 if (phydev->speed != priv->oldspeed) {
1498 switch (phydev->speed) {
1499 case 1000:
1500 tempval =
1501 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1502
1503 ecntrl &= ~(ECNTRL_R100);
1504 break;
1505 case 100:
1506 case 10:
1507 tempval =
1508 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1509
1510 /* Reduced mode distinguishes
1511 * between 10 and 100
1512 */
1513 if (phydev->speed == SPEED_100)
1514 ecntrl |= ECNTRL_R100;
1515 else
1516 ecntrl &= ~(ECNTRL_R100);
1517 break;
1518 default:
1519 netif_warn(priv, link, priv->ndev,
1520 "Ack! Speed (%d) is not 10/100/1000!\n",
1521 phydev->speed);
1522 break;
1523 }
1524
1525 priv->oldspeed = phydev->speed;
1526 }
1527
1528 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1529 tempval1 |= gfar_get_flowctrl_cfg(priv);
1530
1531 /* Turn last free buffer recording on */
1532 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1533 for (i = 0; i < priv->num_rx_queues; i++) {
1534 u32 bdp_dma;
1535
1536 rx_queue = priv->rx_queue[i];
1537 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1538 gfar_write(rx_queue->rfbptr, bdp_dma);
1539 }
1540
1541 priv->tx_actual_en = 1;
1542 }
1543
1544 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1545 priv->tx_actual_en = 0;
1546
1547 gfar_write(®s->maccfg1, tempval1);
1548 gfar_write(®s->maccfg2, tempval);
1549 gfar_write(®s->ecntrl, ecntrl);
1550
1551 if (!priv->oldlink)
1552 priv->oldlink = 1;
1553
1554 } else if (priv->oldlink) {
1555 priv->oldlink = 0;
1556 priv->oldspeed = 0;
1557 priv->oldduplex = -1;
1558 }
1559
1560 if (netif_msg_link(priv))
1561 phy_print_status(phydev);
1562 }
1563
1564 /* Called every time the controller might need to be made
1565 * aware of new link state. The PHY code conveys this
1566 * information through variables in the phydev structure, and this
1567 * function converts those variables into the appropriate
1568 * register values, and can bring down the device if needed.
1569 */
adjust_link(struct net_device * dev)1570 static void adjust_link(struct net_device *dev)
1571 {
1572 struct gfar_private *priv = netdev_priv(dev);
1573 struct phy_device *phydev = dev->phydev;
1574
1575 if (unlikely(phydev->link != priv->oldlink ||
1576 (phydev->link && (phydev->duplex != priv->oldduplex ||
1577 phydev->speed != priv->oldspeed))))
1578 gfar_update_link_state(priv);
1579 }
1580
1581 /* Initialize TBI PHY interface for communicating with the
1582 * SERDES lynx PHY on the chip. We communicate with this PHY
1583 * through the MDIO bus on each controller, treating it as a
1584 * "normal" PHY at the address found in the TBIPA register. We assume
1585 * that the TBIPA register is valid. Either the MDIO bus code will set
1586 * it to a value that doesn't conflict with other PHYs on the bus, or the
1587 * value doesn't matter, as there are no other PHYs on the bus.
1588 */
gfar_configure_serdes(struct net_device * dev)1589 static void gfar_configure_serdes(struct net_device *dev)
1590 {
1591 struct gfar_private *priv = netdev_priv(dev);
1592 struct phy_device *tbiphy;
1593
1594 if (!priv->tbi_node) {
1595 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1596 "device tree specify a tbi-handle\n");
1597 return;
1598 }
1599
1600 tbiphy = of_phy_find_device(priv->tbi_node);
1601 if (!tbiphy) {
1602 dev_err(&dev->dev, "error: Could not get TBI device\n");
1603 return;
1604 }
1605
1606 /* If the link is already up, we must already be ok, and don't need to
1607 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1608 * everything for us? Resetting it takes the link down and requires
1609 * several seconds for it to come back.
1610 */
1611 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1612 put_device(&tbiphy->mdio.dev);
1613 return;
1614 }
1615
1616 /* Single clk mode, mii mode off(for serdes communication) */
1617 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1618
1619 phy_write(tbiphy, MII_ADVERTISE,
1620 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1621 ADVERTISE_1000XPSE_ASYM);
1622
1623 phy_write(tbiphy, MII_BMCR,
1624 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1625 BMCR_SPEED1000);
1626
1627 put_device(&tbiphy->mdio.dev);
1628 }
1629
1630 /* Initializes driver's PHY state, and attaches to the PHY.
1631 * Returns 0 on success.
1632 */
init_phy(struct net_device * dev)1633 static int init_phy(struct net_device *dev)
1634 {
1635 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1636 struct gfar_private *priv = netdev_priv(dev);
1637 phy_interface_t interface = priv->interface;
1638 struct phy_device *phydev;
1639 struct ethtool_eee edata;
1640
1641 linkmode_set_bit_array(phy_10_100_features_array,
1642 ARRAY_SIZE(phy_10_100_features_array),
1643 mask);
1644 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1645 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1646 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1647 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1648
1649 priv->oldlink = 0;
1650 priv->oldspeed = 0;
1651 priv->oldduplex = -1;
1652
1653 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1654 interface);
1655 if (!phydev) {
1656 dev_err(&dev->dev, "could not attach to PHY\n");
1657 return -ENODEV;
1658 }
1659
1660 if (interface == PHY_INTERFACE_MODE_SGMII)
1661 gfar_configure_serdes(dev);
1662
1663 /* Remove any features not supported by the controller */
1664 linkmode_and(phydev->supported, phydev->supported, mask);
1665 linkmode_copy(phydev->advertising, phydev->supported);
1666
1667 /* Add support for flow control */
1668 phy_support_asym_pause(phydev);
1669
1670 /* disable EEE autoneg, EEE not supported by eTSEC */
1671 memset(&edata, 0, sizeof(struct ethtool_eee));
1672 phy_ethtool_set_eee(phydev, &edata);
1673
1674 return 0;
1675 }
1676
gfar_add_fcb(struct sk_buff * skb)1677 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1678 {
1679 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1680
1681 memset(fcb, 0, GMAC_FCB_LEN);
1682
1683 return fcb;
1684 }
1685
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)1686 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1687 int fcb_length)
1688 {
1689 /* If we're here, it's a IP packet with a TCP or UDP
1690 * payload. We set it to checksum, using a pseudo-header
1691 * we provide
1692 */
1693 u8 flags = TXFCB_DEFAULT;
1694
1695 /* Tell the controller what the protocol is
1696 * And provide the already calculated phcs
1697 */
1698 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1699 flags |= TXFCB_UDP;
1700 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1701 } else
1702 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1703
1704 /* l3os is the distance between the start of the
1705 * frame (skb->data) and the start of the IP hdr.
1706 * l4os is the distance between the start of the
1707 * l3 hdr and the l4 hdr
1708 */
1709 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1710 fcb->l4os = skb_network_header_len(skb);
1711
1712 fcb->flags = flags;
1713 }
1714
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1715 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1716 {
1717 fcb->flags |= TXFCB_VLN;
1718 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1719 }
1720
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1721 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1722 struct txbd8 *base, int ring_size)
1723 {
1724 struct txbd8 *new_bd = bdp + stride;
1725
1726 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1727 }
1728
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1729 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1730 int ring_size)
1731 {
1732 return skip_txbd(bdp, 1, base, ring_size);
1733 }
1734
1735 /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)1736 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1737 unsigned long fcb_addr)
1738 {
1739 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1740 (fcb_addr % 0x20) > 0x18);
1741 }
1742
1743 /* eTSEC76: csum generation for frames larger than 2500 may
1744 * cause excess delays before start of transmission
1745 */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)1746 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1747 unsigned int len)
1748 {
1749 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1750 (len > 2500));
1751 }
1752
1753 /* This is called by the kernel when a frame is ready for transmission.
1754 * It is pointed to by the dev->hard_start_xmit function pointer
1755 */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)1756 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1757 {
1758 struct gfar_private *priv = netdev_priv(dev);
1759 struct gfar_priv_tx_q *tx_queue = NULL;
1760 struct netdev_queue *txq;
1761 struct gfar __iomem *regs = NULL;
1762 struct txfcb *fcb = NULL;
1763 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1764 u32 lstatus;
1765 skb_frag_t *frag;
1766 int i, rq = 0;
1767 int do_tstamp, do_csum, do_vlan;
1768 u32 bufaddr;
1769 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1770
1771 rq = skb->queue_mapping;
1772 tx_queue = priv->tx_queue[rq];
1773 txq = netdev_get_tx_queue(dev, rq);
1774 base = tx_queue->tx_bd_base;
1775 regs = tx_queue->grp->regs;
1776
1777 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1778 do_vlan = skb_vlan_tag_present(skb);
1779 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1780 priv->hwts_tx_en;
1781
1782 if (do_csum || do_vlan)
1783 fcb_len = GMAC_FCB_LEN;
1784
1785 /* check if time stamp should be generated */
1786 if (unlikely(do_tstamp))
1787 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1788
1789 /* make space for additional header when fcb is needed */
1790 if (fcb_len) {
1791 if (unlikely(skb_cow_head(skb, fcb_len))) {
1792 dev->stats.tx_errors++;
1793 dev_kfree_skb_any(skb);
1794 return NETDEV_TX_OK;
1795 }
1796 }
1797
1798 /* total number of fragments in the SKB */
1799 nr_frags = skb_shinfo(skb)->nr_frags;
1800
1801 /* calculate the required number of TxBDs for this skb */
1802 if (unlikely(do_tstamp))
1803 nr_txbds = nr_frags + 2;
1804 else
1805 nr_txbds = nr_frags + 1;
1806
1807 /* check if there is space to queue this packet */
1808 if (nr_txbds > tx_queue->num_txbdfree) {
1809 /* no space, stop the queue */
1810 netif_tx_stop_queue(txq);
1811 dev->stats.tx_fifo_errors++;
1812 return NETDEV_TX_BUSY;
1813 }
1814
1815 /* Update transmit stats */
1816 bytes_sent = skb->len;
1817 tx_queue->stats.tx_bytes += bytes_sent;
1818 /* keep Tx bytes on wire for BQL accounting */
1819 GFAR_CB(skb)->bytes_sent = bytes_sent;
1820 tx_queue->stats.tx_packets++;
1821
1822 txbdp = txbdp_start = tx_queue->cur_tx;
1823 lstatus = be32_to_cpu(txbdp->lstatus);
1824
1825 /* Add TxPAL between FCB and frame if required */
1826 if (unlikely(do_tstamp)) {
1827 skb_push(skb, GMAC_TXPAL_LEN);
1828 memset(skb->data, 0, GMAC_TXPAL_LEN);
1829 }
1830
1831 /* Add TxFCB if required */
1832 if (fcb_len) {
1833 fcb = gfar_add_fcb(skb);
1834 lstatus |= BD_LFLAG(TXBD_TOE);
1835 }
1836
1837 /* Set up checksumming */
1838 if (do_csum) {
1839 gfar_tx_checksum(skb, fcb, fcb_len);
1840
1841 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1842 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1843 __skb_pull(skb, GMAC_FCB_LEN);
1844 skb_checksum_help(skb);
1845 if (do_vlan || do_tstamp) {
1846 /* put back a new fcb for vlan/tstamp TOE */
1847 fcb = gfar_add_fcb(skb);
1848 } else {
1849 /* Tx TOE not used */
1850 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1851 fcb = NULL;
1852 }
1853 }
1854 }
1855
1856 if (do_vlan)
1857 gfar_tx_vlan(skb, fcb);
1858
1859 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1860 DMA_TO_DEVICE);
1861 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1862 goto dma_map_err;
1863
1864 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1865
1866 /* Time stamp insertion requires one additional TxBD */
1867 if (unlikely(do_tstamp))
1868 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1869 tx_queue->tx_ring_size);
1870
1871 if (likely(!nr_frags)) {
1872 if (likely(!do_tstamp))
1873 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1874 } else {
1875 u32 lstatus_start = lstatus;
1876
1877 /* Place the fragment addresses and lengths into the TxBDs */
1878 frag = &skb_shinfo(skb)->frags[0];
1879 for (i = 0; i < nr_frags; i++, frag++) {
1880 unsigned int size;
1881
1882 /* Point at the next BD, wrapping as needed */
1883 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1884
1885 size = skb_frag_size(frag);
1886
1887 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1888 BD_LFLAG(TXBD_READY);
1889
1890 /* Handle the last BD specially */
1891 if (i == nr_frags - 1)
1892 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1893
1894 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1895 size, DMA_TO_DEVICE);
1896 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1897 goto dma_map_err;
1898
1899 /* set the TxBD length and buffer pointer */
1900 txbdp->bufPtr = cpu_to_be32(bufaddr);
1901 txbdp->lstatus = cpu_to_be32(lstatus);
1902 }
1903
1904 lstatus = lstatus_start;
1905 }
1906
1907 /* If time stamping is requested one additional TxBD must be set up. The
1908 * first TxBD points to the FCB and must have a data length of
1909 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1910 * the full frame length.
1911 */
1912 if (unlikely(do_tstamp)) {
1913 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1914
1915 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1916 bufaddr += fcb_len;
1917
1918 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1919 (skb_headlen(skb) - fcb_len);
1920 if (!nr_frags)
1921 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1922
1923 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1924 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1925 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1926
1927 /* Setup tx hardware time stamping */
1928 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1929 fcb->ptp = 1;
1930 } else {
1931 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1932 }
1933
1934 netdev_tx_sent_queue(txq, bytes_sent);
1935
1936 gfar_wmb();
1937
1938 txbdp_start->lstatus = cpu_to_be32(lstatus);
1939
1940 gfar_wmb(); /* force lstatus write before tx_skbuff */
1941
1942 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1943
1944 /* Update the current skb pointer to the next entry we will use
1945 * (wrapping if necessary)
1946 */
1947 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1948 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1949
1950 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1951
1952 /* We can work in parallel with gfar_clean_tx_ring(), except
1953 * when modifying num_txbdfree. Note that we didn't grab the lock
1954 * when we were reading the num_txbdfree and checking for available
1955 * space, that's because outside of this function it can only grow.
1956 */
1957 spin_lock_bh(&tx_queue->txlock);
1958 /* reduce TxBD free count */
1959 tx_queue->num_txbdfree -= (nr_txbds);
1960 spin_unlock_bh(&tx_queue->txlock);
1961
1962 /* If the next BD still needs to be cleaned up, then the bds
1963 * are full. We need to tell the kernel to stop sending us stuff.
1964 */
1965 if (!tx_queue->num_txbdfree) {
1966 netif_tx_stop_queue(txq);
1967
1968 dev->stats.tx_fifo_errors++;
1969 }
1970
1971 /* Tell the DMA to go go go */
1972 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1973
1974 return NETDEV_TX_OK;
1975
1976 dma_map_err:
1977 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1978 if (do_tstamp)
1979 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1980 for (i = 0; i < nr_frags; i++) {
1981 lstatus = be32_to_cpu(txbdp->lstatus);
1982 if (!(lstatus & BD_LFLAG(TXBD_READY)))
1983 break;
1984
1985 lstatus &= ~BD_LFLAG(TXBD_READY);
1986 txbdp->lstatus = cpu_to_be32(lstatus);
1987 bufaddr = be32_to_cpu(txbdp->bufPtr);
1988 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
1989 DMA_TO_DEVICE);
1990 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1991 }
1992 gfar_wmb();
1993 dev_kfree_skb_any(skb);
1994 return NETDEV_TX_OK;
1995 }
1996
1997 /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)1998 static int gfar_set_mac_address(struct net_device *dev)
1999 {
2000 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2001
2002 return 0;
2003 }
2004
gfar_change_mtu(struct net_device * dev,int new_mtu)2005 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2006 {
2007 struct gfar_private *priv = netdev_priv(dev);
2008
2009 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2010 cpu_relax();
2011
2012 if (dev->flags & IFF_UP)
2013 stop_gfar(dev);
2014
2015 dev->mtu = new_mtu;
2016
2017 if (dev->flags & IFF_UP)
2018 startup_gfar(dev);
2019
2020 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2021
2022 return 0;
2023 }
2024
reset_gfar(struct net_device * ndev)2025 static void reset_gfar(struct net_device *ndev)
2026 {
2027 struct gfar_private *priv = netdev_priv(ndev);
2028
2029 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2030 cpu_relax();
2031
2032 stop_gfar(ndev);
2033 startup_gfar(ndev);
2034
2035 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2036 }
2037
2038 /* gfar_reset_task gets scheduled when a packet has not been
2039 * transmitted after a set amount of time.
2040 * For now, assume that clearing out all the structures, and
2041 * starting over will fix the problem.
2042 */
gfar_reset_task(struct work_struct * work)2043 static void gfar_reset_task(struct work_struct *work)
2044 {
2045 struct gfar_private *priv = container_of(work, struct gfar_private,
2046 reset_task);
2047 reset_gfar(priv->ndev);
2048 }
2049
gfar_timeout(struct net_device * dev,unsigned int txqueue)2050 static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2051 {
2052 struct gfar_private *priv = netdev_priv(dev);
2053
2054 dev->stats.tx_errors++;
2055 schedule_work(&priv->reset_task);
2056 }
2057
gfar_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)2058 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2059 {
2060 struct hwtstamp_config config;
2061 struct gfar_private *priv = netdev_priv(netdev);
2062
2063 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2064 return -EFAULT;
2065
2066 /* reserved for future extensions */
2067 if (config.flags)
2068 return -EINVAL;
2069
2070 switch (config.tx_type) {
2071 case HWTSTAMP_TX_OFF:
2072 priv->hwts_tx_en = 0;
2073 break;
2074 case HWTSTAMP_TX_ON:
2075 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2076 return -ERANGE;
2077 priv->hwts_tx_en = 1;
2078 break;
2079 default:
2080 return -ERANGE;
2081 }
2082
2083 switch (config.rx_filter) {
2084 case HWTSTAMP_FILTER_NONE:
2085 if (priv->hwts_rx_en) {
2086 priv->hwts_rx_en = 0;
2087 reset_gfar(netdev);
2088 }
2089 break;
2090 default:
2091 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2092 return -ERANGE;
2093 if (!priv->hwts_rx_en) {
2094 priv->hwts_rx_en = 1;
2095 reset_gfar(netdev);
2096 }
2097 config.rx_filter = HWTSTAMP_FILTER_ALL;
2098 break;
2099 }
2100
2101 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2102 -EFAULT : 0;
2103 }
2104
gfar_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)2105 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2106 {
2107 struct hwtstamp_config config;
2108 struct gfar_private *priv = netdev_priv(netdev);
2109
2110 config.flags = 0;
2111 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2112 config.rx_filter = (priv->hwts_rx_en ?
2113 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2114
2115 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2116 -EFAULT : 0;
2117 }
2118
gfar_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2119 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2120 {
2121 struct phy_device *phydev = dev->phydev;
2122
2123 if (!netif_running(dev))
2124 return -EINVAL;
2125
2126 if (cmd == SIOCSHWTSTAMP)
2127 return gfar_hwtstamp_set(dev, rq);
2128 if (cmd == SIOCGHWTSTAMP)
2129 return gfar_hwtstamp_get(dev, rq);
2130
2131 if (!phydev)
2132 return -ENODEV;
2133
2134 return phy_mii_ioctl(phydev, rq, cmd);
2135 }
2136
2137 /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2138 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2139 {
2140 struct net_device *dev = tx_queue->dev;
2141 struct netdev_queue *txq;
2142 struct gfar_private *priv = netdev_priv(dev);
2143 struct txbd8 *bdp, *next = NULL;
2144 struct txbd8 *lbdp = NULL;
2145 struct txbd8 *base = tx_queue->tx_bd_base;
2146 struct sk_buff *skb;
2147 int skb_dirtytx;
2148 int tx_ring_size = tx_queue->tx_ring_size;
2149 int frags = 0, nr_txbds = 0;
2150 int i;
2151 int howmany = 0;
2152 int tqi = tx_queue->qindex;
2153 unsigned int bytes_sent = 0;
2154 u32 lstatus;
2155 size_t buflen;
2156
2157 txq = netdev_get_tx_queue(dev, tqi);
2158 bdp = tx_queue->dirty_tx;
2159 skb_dirtytx = tx_queue->skb_dirtytx;
2160
2161 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2162 bool do_tstamp;
2163
2164 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2165 priv->hwts_tx_en;
2166
2167 frags = skb_shinfo(skb)->nr_frags;
2168
2169 /* When time stamping, one additional TxBD must be freed.
2170 * Also, we need to dma_unmap_single() the TxPAL.
2171 */
2172 if (unlikely(do_tstamp))
2173 nr_txbds = frags + 2;
2174 else
2175 nr_txbds = frags + 1;
2176
2177 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2178
2179 lstatus = be32_to_cpu(lbdp->lstatus);
2180
2181 /* Only clean completed frames */
2182 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2183 (lstatus & BD_LENGTH_MASK))
2184 break;
2185
2186 if (unlikely(do_tstamp)) {
2187 next = next_txbd(bdp, base, tx_ring_size);
2188 buflen = be16_to_cpu(next->length) +
2189 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2190 } else
2191 buflen = be16_to_cpu(bdp->length);
2192
2193 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2194 buflen, DMA_TO_DEVICE);
2195
2196 if (unlikely(do_tstamp)) {
2197 struct skb_shared_hwtstamps shhwtstamps;
2198 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2199 ~0x7UL);
2200
2201 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2202 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2203 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2204 skb_tstamp_tx(skb, &shhwtstamps);
2205 gfar_clear_txbd_status(bdp);
2206 bdp = next;
2207 }
2208
2209 gfar_clear_txbd_status(bdp);
2210 bdp = next_txbd(bdp, base, tx_ring_size);
2211
2212 for (i = 0; i < frags; i++) {
2213 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2214 be16_to_cpu(bdp->length),
2215 DMA_TO_DEVICE);
2216 gfar_clear_txbd_status(bdp);
2217 bdp = next_txbd(bdp, base, tx_ring_size);
2218 }
2219
2220 bytes_sent += GFAR_CB(skb)->bytes_sent;
2221
2222 dev_kfree_skb_any(skb);
2223
2224 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2225
2226 skb_dirtytx = (skb_dirtytx + 1) &
2227 TX_RING_MOD_MASK(tx_ring_size);
2228
2229 howmany++;
2230 spin_lock(&tx_queue->txlock);
2231 tx_queue->num_txbdfree += nr_txbds;
2232 spin_unlock(&tx_queue->txlock);
2233 }
2234
2235 /* If we freed a buffer, we can restart transmission, if necessary */
2236 if (tx_queue->num_txbdfree &&
2237 netif_tx_queue_stopped(txq) &&
2238 !(test_bit(GFAR_DOWN, &priv->state)))
2239 netif_wake_subqueue(priv->ndev, tqi);
2240
2241 /* Update dirty indicators */
2242 tx_queue->skb_dirtytx = skb_dirtytx;
2243 tx_queue->dirty_tx = bdp;
2244
2245 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2246 }
2247
count_errors(u32 lstatus,struct net_device * ndev)2248 static void count_errors(u32 lstatus, struct net_device *ndev)
2249 {
2250 struct gfar_private *priv = netdev_priv(ndev);
2251 struct net_device_stats *stats = &ndev->stats;
2252 struct gfar_extra_stats *estats = &priv->extra_stats;
2253
2254 /* If the packet was truncated, none of the other errors matter */
2255 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2256 stats->rx_length_errors++;
2257
2258 atomic64_inc(&estats->rx_trunc);
2259
2260 return;
2261 }
2262 /* Count the errors, if there were any */
2263 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2264 stats->rx_length_errors++;
2265
2266 if (lstatus & BD_LFLAG(RXBD_LARGE))
2267 atomic64_inc(&estats->rx_large);
2268 else
2269 atomic64_inc(&estats->rx_short);
2270 }
2271 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2272 stats->rx_frame_errors++;
2273 atomic64_inc(&estats->rx_nonoctet);
2274 }
2275 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2276 atomic64_inc(&estats->rx_crcerr);
2277 stats->rx_crc_errors++;
2278 }
2279 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2280 atomic64_inc(&estats->rx_overrun);
2281 stats->rx_over_errors++;
2282 }
2283 }
2284
gfar_receive(int irq,void * grp_id)2285 static irqreturn_t gfar_receive(int irq, void *grp_id)
2286 {
2287 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2288 unsigned long flags;
2289 u32 imask, ievent;
2290
2291 ievent = gfar_read(&grp->regs->ievent);
2292
2293 if (unlikely(ievent & IEVENT_FGPI)) {
2294 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2295 return IRQ_HANDLED;
2296 }
2297
2298 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2299 spin_lock_irqsave(&grp->grplock, flags);
2300 imask = gfar_read(&grp->regs->imask);
2301 imask &= IMASK_RX_DISABLED;
2302 gfar_write(&grp->regs->imask, imask);
2303 spin_unlock_irqrestore(&grp->grplock, flags);
2304 __napi_schedule(&grp->napi_rx);
2305 } else {
2306 /* Clear IEVENT, so interrupts aren't called again
2307 * because of the packets that have already arrived.
2308 */
2309 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2310 }
2311
2312 return IRQ_HANDLED;
2313 }
2314
2315 /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2316 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2317 {
2318 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2319 unsigned long flags;
2320 u32 imask;
2321
2322 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2323 spin_lock_irqsave(&grp->grplock, flags);
2324 imask = gfar_read(&grp->regs->imask);
2325 imask &= IMASK_TX_DISABLED;
2326 gfar_write(&grp->regs->imask, imask);
2327 spin_unlock_irqrestore(&grp->grplock, flags);
2328 __napi_schedule(&grp->napi_tx);
2329 } else {
2330 /* Clear IEVENT, so interrupts aren't called again
2331 * because of the packets that have already arrived.
2332 */
2333 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2334 }
2335
2336 return IRQ_HANDLED;
2337 }
2338
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)2339 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2340 struct sk_buff *skb, bool first)
2341 {
2342 int size = lstatus & BD_LENGTH_MASK;
2343 struct page *page = rxb->page;
2344
2345 if (likely(first)) {
2346 skb_put(skb, size);
2347 } else {
2348 /* the last fragments' length contains the full frame length */
2349 if (lstatus & BD_LFLAG(RXBD_LAST))
2350 size -= skb->len;
2351
2352 WARN(size < 0, "gianfar: rx fragment size underflow");
2353 if (size < 0)
2354 return false;
2355
2356 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2357 rxb->page_offset + RXBUF_ALIGNMENT,
2358 size, GFAR_RXB_TRUESIZE);
2359 }
2360
2361 /* try reuse page */
2362 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2363 return false;
2364
2365 /* change offset to the other half */
2366 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2367
2368 page_ref_inc(page);
2369
2370 return true;
2371 }
2372
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)2373 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2374 struct gfar_rx_buff *old_rxb)
2375 {
2376 struct gfar_rx_buff *new_rxb;
2377 u16 nta = rxq->next_to_alloc;
2378
2379 new_rxb = &rxq->rx_buff[nta];
2380
2381 /* find next buf that can reuse a page */
2382 nta++;
2383 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2384
2385 /* copy page reference */
2386 *new_rxb = *old_rxb;
2387
2388 /* sync for use by the device */
2389 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2390 old_rxb->page_offset,
2391 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2392 }
2393
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)2394 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2395 u32 lstatus, struct sk_buff *skb)
2396 {
2397 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2398 struct page *page = rxb->page;
2399 bool first = false;
2400
2401 if (likely(!skb)) {
2402 void *buff_addr = page_address(page) + rxb->page_offset;
2403
2404 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2405 if (unlikely(!skb)) {
2406 gfar_rx_alloc_err(rx_queue);
2407 return NULL;
2408 }
2409 skb_reserve(skb, RXBUF_ALIGNMENT);
2410 first = true;
2411 }
2412
2413 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2414 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2415
2416 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2417 /* reuse the free half of the page */
2418 gfar_reuse_rx_page(rx_queue, rxb);
2419 } else {
2420 /* page cannot be reused, unmap it */
2421 dma_unmap_page(rx_queue->dev, rxb->dma,
2422 PAGE_SIZE, DMA_FROM_DEVICE);
2423 }
2424
2425 /* clear rxb content */
2426 rxb->page = NULL;
2427
2428 return skb;
2429 }
2430
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2431 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2432 {
2433 /* If valid headers were found, and valid sums
2434 * were verified, then we tell the kernel that no
2435 * checksumming is necessary. Otherwise, it is [FIXME]
2436 */
2437 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2438 (RXFCB_CIP | RXFCB_CTU))
2439 skb->ip_summed = CHECKSUM_UNNECESSARY;
2440 else
2441 skb_checksum_none_assert(skb);
2442 }
2443
2444 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2445 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2446 {
2447 struct gfar_private *priv = netdev_priv(ndev);
2448 struct rxfcb *fcb = NULL;
2449
2450 /* fcb is at the beginning if exists */
2451 fcb = (struct rxfcb *)skb->data;
2452
2453 /* Remove the FCB from the skb
2454 * Remove the padded bytes, if there are any
2455 */
2456 if (priv->uses_rxfcb)
2457 skb_pull(skb, GMAC_FCB_LEN);
2458
2459 /* Get receive timestamp from the skb */
2460 if (priv->hwts_rx_en) {
2461 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2462 u64 *ns = (u64 *) skb->data;
2463
2464 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2465 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2466 }
2467
2468 if (priv->padding)
2469 skb_pull(skb, priv->padding);
2470
2471 /* Trim off the FCS */
2472 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2473
2474 if (ndev->features & NETIF_F_RXCSUM)
2475 gfar_rx_checksum(skb, fcb);
2476
2477 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2478 * Even if vlan rx accel is disabled, on some chips
2479 * RXFCB_VLN is pseudo randomly set.
2480 */
2481 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2482 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2483 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2484 be16_to_cpu(fcb->vlctl));
2485 }
2486
2487 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2488 * until the budget/quota has been reached. Returns the number
2489 * of frames handled
2490 */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)2491 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2492 int rx_work_limit)
2493 {
2494 struct net_device *ndev = rx_queue->ndev;
2495 struct gfar_private *priv = netdev_priv(ndev);
2496 struct rxbd8 *bdp;
2497 int i, howmany = 0;
2498 struct sk_buff *skb = rx_queue->skb;
2499 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2500 unsigned int total_bytes = 0, total_pkts = 0;
2501
2502 /* Get the first full descriptor */
2503 i = rx_queue->next_to_clean;
2504
2505 while (rx_work_limit--) {
2506 u32 lstatus;
2507
2508 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2509 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2510 cleaned_cnt = 0;
2511 }
2512
2513 bdp = &rx_queue->rx_bd_base[i];
2514 lstatus = be32_to_cpu(bdp->lstatus);
2515 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2516 break;
2517
2518 /* lost RXBD_LAST descriptor due to overrun */
2519 if (skb &&
2520 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2521 /* discard faulty buffer */
2522 dev_kfree_skb(skb);
2523 skb = NULL;
2524 rx_queue->stats.rx_dropped++;
2525
2526 /* can continue normally */
2527 }
2528
2529 /* order rx buffer descriptor reads */
2530 rmb();
2531
2532 /* fetch next to clean buffer from the ring */
2533 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2534 if (unlikely(!skb))
2535 break;
2536
2537 cleaned_cnt++;
2538 howmany++;
2539
2540 if (unlikely(++i == rx_queue->rx_ring_size))
2541 i = 0;
2542
2543 rx_queue->next_to_clean = i;
2544
2545 /* fetch next buffer if not the last in frame */
2546 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2547 continue;
2548
2549 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2550 count_errors(lstatus, ndev);
2551
2552 /* discard faulty buffer */
2553 dev_kfree_skb(skb);
2554 skb = NULL;
2555 rx_queue->stats.rx_dropped++;
2556 continue;
2557 }
2558
2559 gfar_process_frame(ndev, skb);
2560
2561 /* Increment the number of packets */
2562 total_pkts++;
2563 total_bytes += skb->len;
2564
2565 skb_record_rx_queue(skb, rx_queue->qindex);
2566
2567 skb->protocol = eth_type_trans(skb, ndev);
2568
2569 /* Send the packet up the stack */
2570 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2571
2572 skb = NULL;
2573 }
2574
2575 /* Store incomplete frames for completion */
2576 rx_queue->skb = skb;
2577
2578 rx_queue->stats.rx_packets += total_pkts;
2579 rx_queue->stats.rx_bytes += total_bytes;
2580
2581 if (cleaned_cnt)
2582 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2583
2584 /* Update Last Free RxBD pointer for LFC */
2585 if (unlikely(priv->tx_actual_en)) {
2586 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2587
2588 gfar_write(rx_queue->rfbptr, bdp_dma);
2589 }
2590
2591 return howmany;
2592 }
2593
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2594 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2595 {
2596 struct gfar_priv_grp *gfargrp =
2597 container_of(napi, struct gfar_priv_grp, napi_rx);
2598 struct gfar __iomem *regs = gfargrp->regs;
2599 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2600 int work_done = 0;
2601
2602 /* Clear IEVENT, so interrupts aren't called again
2603 * because of the packets that have already arrived
2604 */
2605 gfar_write(®s->ievent, IEVENT_RX_MASK);
2606
2607 work_done = gfar_clean_rx_ring(rx_queue, budget);
2608
2609 if (work_done < budget) {
2610 u32 imask;
2611 napi_complete_done(napi, work_done);
2612 /* Clear the halt bit in RSTAT */
2613 gfar_write(®s->rstat, gfargrp->rstat);
2614
2615 spin_lock_irq(&gfargrp->grplock);
2616 imask = gfar_read(®s->imask);
2617 imask |= IMASK_RX_DEFAULT;
2618 gfar_write(®s->imask, imask);
2619 spin_unlock_irq(&gfargrp->grplock);
2620 }
2621
2622 return work_done;
2623 }
2624
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2625 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2626 {
2627 struct gfar_priv_grp *gfargrp =
2628 container_of(napi, struct gfar_priv_grp, napi_tx);
2629 struct gfar __iomem *regs = gfargrp->regs;
2630 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2631 u32 imask;
2632
2633 /* Clear IEVENT, so interrupts aren't called again
2634 * because of the packets that have already arrived
2635 */
2636 gfar_write(®s->ievent, IEVENT_TX_MASK);
2637
2638 /* run Tx cleanup to completion */
2639 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2640 gfar_clean_tx_ring(tx_queue);
2641
2642 napi_complete(napi);
2643
2644 spin_lock_irq(&gfargrp->grplock);
2645 imask = gfar_read(®s->imask);
2646 imask |= IMASK_TX_DEFAULT;
2647 gfar_write(®s->imask, imask);
2648 spin_unlock_irq(&gfargrp->grplock);
2649
2650 return 0;
2651 }
2652
2653 /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)2654 static irqreturn_t gfar_error(int irq, void *grp_id)
2655 {
2656 struct gfar_priv_grp *gfargrp = grp_id;
2657 struct gfar __iomem *regs = gfargrp->regs;
2658 struct gfar_private *priv= gfargrp->priv;
2659 struct net_device *dev = priv->ndev;
2660
2661 /* Save ievent for future reference */
2662 u32 events = gfar_read(®s->ievent);
2663
2664 /* Clear IEVENT */
2665 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2666
2667 /* Magic Packet is not an error. */
2668 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2669 (events & IEVENT_MAG))
2670 events &= ~IEVENT_MAG;
2671
2672 /* Hmm... */
2673 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2674 netdev_dbg(dev,
2675 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2676 events, gfar_read(®s->imask));
2677
2678 /* Update the error counters */
2679 if (events & IEVENT_TXE) {
2680 dev->stats.tx_errors++;
2681
2682 if (events & IEVENT_LC)
2683 dev->stats.tx_window_errors++;
2684 if (events & IEVENT_CRL)
2685 dev->stats.tx_aborted_errors++;
2686 if (events & IEVENT_XFUN) {
2687 netif_dbg(priv, tx_err, dev,
2688 "TX FIFO underrun, packet dropped\n");
2689 dev->stats.tx_dropped++;
2690 atomic64_inc(&priv->extra_stats.tx_underrun);
2691
2692 schedule_work(&priv->reset_task);
2693 }
2694 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2695 }
2696 if (events & IEVENT_BSY) {
2697 dev->stats.rx_over_errors++;
2698 atomic64_inc(&priv->extra_stats.rx_bsy);
2699
2700 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2701 gfar_read(®s->rstat));
2702 }
2703 if (events & IEVENT_BABR) {
2704 dev->stats.rx_errors++;
2705 atomic64_inc(&priv->extra_stats.rx_babr);
2706
2707 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2708 }
2709 if (events & IEVENT_EBERR) {
2710 atomic64_inc(&priv->extra_stats.eberr);
2711 netif_dbg(priv, rx_err, dev, "bus error\n");
2712 }
2713 if (events & IEVENT_RXC)
2714 netif_dbg(priv, rx_status, dev, "control frame\n");
2715
2716 if (events & IEVENT_BABT) {
2717 atomic64_inc(&priv->extra_stats.tx_babt);
2718 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2719 }
2720 return IRQ_HANDLED;
2721 }
2722
2723 /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)2724 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2725 {
2726 struct gfar_priv_grp *gfargrp = grp_id;
2727
2728 /* Save ievent for future reference */
2729 u32 events = gfar_read(&gfargrp->regs->ievent);
2730
2731 /* Check for reception */
2732 if (events & IEVENT_RX_MASK)
2733 gfar_receive(irq, grp_id);
2734
2735 /* Check for transmit completion */
2736 if (events & IEVENT_TX_MASK)
2737 gfar_transmit(irq, grp_id);
2738
2739 /* Check for errors */
2740 if (events & IEVENT_ERR_MASK)
2741 gfar_error(irq, grp_id);
2742
2743 return IRQ_HANDLED;
2744 }
2745
2746 #ifdef CONFIG_NET_POLL_CONTROLLER
2747 /* Polling 'interrupt' - used by things like netconsole to send skbs
2748 * without having to re-enable interrupts. It's not called while
2749 * the interrupt routine is executing.
2750 */
gfar_netpoll(struct net_device * dev)2751 static void gfar_netpoll(struct net_device *dev)
2752 {
2753 struct gfar_private *priv = netdev_priv(dev);
2754 int i;
2755
2756 /* If the device has multiple interrupts, run tx/rx */
2757 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2758 for (i = 0; i < priv->num_grps; i++) {
2759 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2760
2761 disable_irq(gfar_irq(grp, TX)->irq);
2762 disable_irq(gfar_irq(grp, RX)->irq);
2763 disable_irq(gfar_irq(grp, ER)->irq);
2764 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2765 enable_irq(gfar_irq(grp, ER)->irq);
2766 enable_irq(gfar_irq(grp, RX)->irq);
2767 enable_irq(gfar_irq(grp, TX)->irq);
2768 }
2769 } else {
2770 for (i = 0; i < priv->num_grps; i++) {
2771 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2772
2773 disable_irq(gfar_irq(grp, TX)->irq);
2774 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2775 enable_irq(gfar_irq(grp, TX)->irq);
2776 }
2777 }
2778 }
2779 #endif
2780
free_grp_irqs(struct gfar_priv_grp * grp)2781 static void free_grp_irqs(struct gfar_priv_grp *grp)
2782 {
2783 free_irq(gfar_irq(grp, TX)->irq, grp);
2784 free_irq(gfar_irq(grp, RX)->irq, grp);
2785 free_irq(gfar_irq(grp, ER)->irq, grp);
2786 }
2787
register_grp_irqs(struct gfar_priv_grp * grp)2788 static int register_grp_irqs(struct gfar_priv_grp *grp)
2789 {
2790 struct gfar_private *priv = grp->priv;
2791 struct net_device *dev = priv->ndev;
2792 int err;
2793
2794 /* If the device has multiple interrupts, register for
2795 * them. Otherwise, only register for the one
2796 */
2797 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2798 /* Install our interrupt handlers for Error,
2799 * Transmit, and Receive
2800 */
2801 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2802 gfar_irq(grp, ER)->name, grp);
2803 if (err < 0) {
2804 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2805 gfar_irq(grp, ER)->irq);
2806
2807 goto err_irq_fail;
2808 }
2809 enable_irq_wake(gfar_irq(grp, ER)->irq);
2810
2811 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2812 gfar_irq(grp, TX)->name, grp);
2813 if (err < 0) {
2814 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2815 gfar_irq(grp, TX)->irq);
2816 goto tx_irq_fail;
2817 }
2818 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2819 gfar_irq(grp, RX)->name, grp);
2820 if (err < 0) {
2821 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2822 gfar_irq(grp, RX)->irq);
2823 goto rx_irq_fail;
2824 }
2825 enable_irq_wake(gfar_irq(grp, RX)->irq);
2826
2827 } else {
2828 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2829 gfar_irq(grp, TX)->name, grp);
2830 if (err < 0) {
2831 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2832 gfar_irq(grp, TX)->irq);
2833 goto err_irq_fail;
2834 }
2835 enable_irq_wake(gfar_irq(grp, TX)->irq);
2836 }
2837
2838 return 0;
2839
2840 rx_irq_fail:
2841 free_irq(gfar_irq(grp, TX)->irq, grp);
2842 tx_irq_fail:
2843 free_irq(gfar_irq(grp, ER)->irq, grp);
2844 err_irq_fail:
2845 return err;
2846
2847 }
2848
gfar_free_irq(struct gfar_private * priv)2849 static void gfar_free_irq(struct gfar_private *priv)
2850 {
2851 int i;
2852
2853 /* Free the IRQs */
2854 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2855 for (i = 0; i < priv->num_grps; i++)
2856 free_grp_irqs(&priv->gfargrp[i]);
2857 } else {
2858 for (i = 0; i < priv->num_grps; i++)
2859 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2860 &priv->gfargrp[i]);
2861 }
2862 }
2863
gfar_request_irq(struct gfar_private * priv)2864 static int gfar_request_irq(struct gfar_private *priv)
2865 {
2866 int err, i, j;
2867
2868 for (i = 0; i < priv->num_grps; i++) {
2869 err = register_grp_irqs(&priv->gfargrp[i]);
2870 if (err) {
2871 for (j = 0; j < i; j++)
2872 free_grp_irqs(&priv->gfargrp[j]);
2873 return err;
2874 }
2875 }
2876
2877 return 0;
2878 }
2879
2880 /* Called when something needs to use the ethernet device
2881 * Returns 0 for success.
2882 */
gfar_enet_open(struct net_device * dev)2883 static int gfar_enet_open(struct net_device *dev)
2884 {
2885 struct gfar_private *priv = netdev_priv(dev);
2886 int err;
2887
2888 err = init_phy(dev);
2889 if (err)
2890 return err;
2891
2892 err = gfar_request_irq(priv);
2893 if (err)
2894 return err;
2895
2896 err = startup_gfar(dev);
2897 if (err)
2898 return err;
2899
2900 return err;
2901 }
2902
2903 /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)2904 static int gfar_close(struct net_device *dev)
2905 {
2906 struct gfar_private *priv = netdev_priv(dev);
2907
2908 cancel_work_sync(&priv->reset_task);
2909 stop_gfar(dev);
2910
2911 /* Disconnect from the PHY */
2912 phy_disconnect(dev->phydev);
2913
2914 gfar_free_irq(priv);
2915
2916 return 0;
2917 }
2918
2919 /* Clears each of the exact match registers to zero, so they
2920 * don't interfere with normal reception
2921 */
gfar_clear_exact_match(struct net_device * dev)2922 static void gfar_clear_exact_match(struct net_device *dev)
2923 {
2924 int idx;
2925 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
2926
2927 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
2928 gfar_set_mac_for_addr(dev, idx, zero_arr);
2929 }
2930
2931 /* Update the hash table based on the current list of multicast
2932 * addresses we subscribe to. Also, change the promiscuity of
2933 * the device based on the flags (this function is called
2934 * whenever dev->flags is changed
2935 */
gfar_set_multi(struct net_device * dev)2936 static void gfar_set_multi(struct net_device *dev)
2937 {
2938 struct netdev_hw_addr *ha;
2939 struct gfar_private *priv = netdev_priv(dev);
2940 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2941 u32 tempval;
2942
2943 if (dev->flags & IFF_PROMISC) {
2944 /* Set RCTRL to PROM */
2945 tempval = gfar_read(®s->rctrl);
2946 tempval |= RCTRL_PROM;
2947 gfar_write(®s->rctrl, tempval);
2948 } else {
2949 /* Set RCTRL to not PROM */
2950 tempval = gfar_read(®s->rctrl);
2951 tempval &= ~(RCTRL_PROM);
2952 gfar_write(®s->rctrl, tempval);
2953 }
2954
2955 if (dev->flags & IFF_ALLMULTI) {
2956 /* Set the hash to rx all multicast frames */
2957 gfar_write(®s->igaddr0, 0xffffffff);
2958 gfar_write(®s->igaddr1, 0xffffffff);
2959 gfar_write(®s->igaddr2, 0xffffffff);
2960 gfar_write(®s->igaddr3, 0xffffffff);
2961 gfar_write(®s->igaddr4, 0xffffffff);
2962 gfar_write(®s->igaddr5, 0xffffffff);
2963 gfar_write(®s->igaddr6, 0xffffffff);
2964 gfar_write(®s->igaddr7, 0xffffffff);
2965 gfar_write(®s->gaddr0, 0xffffffff);
2966 gfar_write(®s->gaddr1, 0xffffffff);
2967 gfar_write(®s->gaddr2, 0xffffffff);
2968 gfar_write(®s->gaddr3, 0xffffffff);
2969 gfar_write(®s->gaddr4, 0xffffffff);
2970 gfar_write(®s->gaddr5, 0xffffffff);
2971 gfar_write(®s->gaddr6, 0xffffffff);
2972 gfar_write(®s->gaddr7, 0xffffffff);
2973 } else {
2974 int em_num;
2975 int idx;
2976
2977 /* zero out the hash */
2978 gfar_write(®s->igaddr0, 0x0);
2979 gfar_write(®s->igaddr1, 0x0);
2980 gfar_write(®s->igaddr2, 0x0);
2981 gfar_write(®s->igaddr3, 0x0);
2982 gfar_write(®s->igaddr4, 0x0);
2983 gfar_write(®s->igaddr5, 0x0);
2984 gfar_write(®s->igaddr6, 0x0);
2985 gfar_write(®s->igaddr7, 0x0);
2986 gfar_write(®s->gaddr0, 0x0);
2987 gfar_write(®s->gaddr1, 0x0);
2988 gfar_write(®s->gaddr2, 0x0);
2989 gfar_write(®s->gaddr3, 0x0);
2990 gfar_write(®s->gaddr4, 0x0);
2991 gfar_write(®s->gaddr5, 0x0);
2992 gfar_write(®s->gaddr6, 0x0);
2993 gfar_write(®s->gaddr7, 0x0);
2994
2995 /* If we have extended hash tables, we need to
2996 * clear the exact match registers to prepare for
2997 * setting them
2998 */
2999 if (priv->extended_hash) {
3000 em_num = GFAR_EM_NUM + 1;
3001 gfar_clear_exact_match(dev);
3002 idx = 1;
3003 } else {
3004 idx = 0;
3005 em_num = 0;
3006 }
3007
3008 if (netdev_mc_empty(dev))
3009 return;
3010
3011 /* Parse the list, and set the appropriate bits */
3012 netdev_for_each_mc_addr(ha, dev) {
3013 if (idx < em_num) {
3014 gfar_set_mac_for_addr(dev, idx, ha->addr);
3015 idx++;
3016 } else
3017 gfar_set_hash_for_addr(dev, ha->addr);
3018 }
3019 }
3020 }
3021
gfar_mac_reset(struct gfar_private * priv)3022 void gfar_mac_reset(struct gfar_private *priv)
3023 {
3024 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3025 u32 tempval;
3026
3027 /* Reset MAC layer */
3028 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3029
3030 /* We need to delay at least 3 TX clocks */
3031 udelay(3);
3032
3033 /* the soft reset bit is not self-resetting, so we need to
3034 * clear it before resuming normal operation
3035 */
3036 gfar_write(®s->maccfg1, 0);
3037
3038 udelay(3);
3039
3040 gfar_rx_offload_en(priv);
3041
3042 /* Initialize the max receive frame/buffer lengths */
3043 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3044 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3045
3046 /* Initialize the Minimum Frame Length Register */
3047 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3048
3049 /* Initialize MACCFG2. */
3050 tempval = MACCFG2_INIT_SETTINGS;
3051
3052 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3053 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3054 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3055 */
3056 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3057 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3058
3059 gfar_write(®s->maccfg2, tempval);
3060
3061 /* Clear mac addr hash registers */
3062 gfar_write(®s->igaddr0, 0);
3063 gfar_write(®s->igaddr1, 0);
3064 gfar_write(®s->igaddr2, 0);
3065 gfar_write(®s->igaddr3, 0);
3066 gfar_write(®s->igaddr4, 0);
3067 gfar_write(®s->igaddr5, 0);
3068 gfar_write(®s->igaddr6, 0);
3069 gfar_write(®s->igaddr7, 0);
3070
3071 gfar_write(®s->gaddr0, 0);
3072 gfar_write(®s->gaddr1, 0);
3073 gfar_write(®s->gaddr2, 0);
3074 gfar_write(®s->gaddr3, 0);
3075 gfar_write(®s->gaddr4, 0);
3076 gfar_write(®s->gaddr5, 0);
3077 gfar_write(®s->gaddr6, 0);
3078 gfar_write(®s->gaddr7, 0);
3079
3080 if (priv->extended_hash)
3081 gfar_clear_exact_match(priv->ndev);
3082
3083 gfar_mac_rx_config(priv);
3084
3085 gfar_mac_tx_config(priv);
3086
3087 gfar_set_mac_address(priv->ndev);
3088
3089 gfar_set_multi(priv->ndev);
3090
3091 /* clear ievent and imask before configuring coalescing */
3092 gfar_ints_disable(priv);
3093
3094 /* Configure the coalescing support */
3095 gfar_configure_coalescing_all(priv);
3096 }
3097
gfar_hw_init(struct gfar_private * priv)3098 static void gfar_hw_init(struct gfar_private *priv)
3099 {
3100 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3101 u32 attrs;
3102
3103 /* Stop the DMA engine now, in case it was running before
3104 * (The firmware could have used it, and left it running).
3105 */
3106 gfar_halt(priv);
3107
3108 gfar_mac_reset(priv);
3109
3110 /* Zero out the rmon mib registers if it has them */
3111 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3112 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3113
3114 /* Mask off the CAM interrupts */
3115 gfar_write(®s->rmon.cam1, 0xffffffff);
3116 gfar_write(®s->rmon.cam2, 0xffffffff);
3117 }
3118
3119 /* Initialize ECNTRL */
3120 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3121
3122 /* Set the extraction length and index */
3123 attrs = ATTRELI_EL(priv->rx_stash_size) |
3124 ATTRELI_EI(priv->rx_stash_index);
3125
3126 gfar_write(®s->attreli, attrs);
3127
3128 /* Start with defaults, and add stashing
3129 * depending on driver parameters
3130 */
3131 attrs = ATTR_INIT_SETTINGS;
3132
3133 if (priv->bd_stash_en)
3134 attrs |= ATTR_BDSTASH;
3135
3136 if (priv->rx_stash_size != 0)
3137 attrs |= ATTR_BUFSTASH;
3138
3139 gfar_write(®s->attr, attrs);
3140
3141 /* FIFO configs */
3142 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3143 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3144 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3145
3146 /* Program the interrupt steering regs, only for MG devices */
3147 if (priv->num_grps > 1)
3148 gfar_write_isrg(priv);
3149 }
3150
3151 static const struct net_device_ops gfar_netdev_ops = {
3152 .ndo_open = gfar_enet_open,
3153 .ndo_start_xmit = gfar_start_xmit,
3154 .ndo_stop = gfar_close,
3155 .ndo_change_mtu = gfar_change_mtu,
3156 .ndo_set_features = gfar_set_features,
3157 .ndo_set_rx_mode = gfar_set_multi,
3158 .ndo_tx_timeout = gfar_timeout,
3159 .ndo_do_ioctl = gfar_ioctl,
3160 .ndo_get_stats = gfar_get_stats,
3161 .ndo_change_carrier = fixed_phy_change_carrier,
3162 .ndo_set_mac_address = gfar_set_mac_addr,
3163 .ndo_validate_addr = eth_validate_addr,
3164 #ifdef CONFIG_NET_POLL_CONTROLLER
3165 .ndo_poll_controller = gfar_netpoll,
3166 #endif
3167 };
3168
3169 /* Set up the ethernet device structure, private data,
3170 * and anything else we need before we start
3171 */
gfar_probe(struct platform_device * ofdev)3172 static int gfar_probe(struct platform_device *ofdev)
3173 {
3174 struct device_node *np = ofdev->dev.of_node;
3175 struct net_device *dev = NULL;
3176 struct gfar_private *priv = NULL;
3177 int err = 0, i;
3178
3179 err = gfar_of_init(ofdev, &dev);
3180
3181 if (err)
3182 return err;
3183
3184 priv = netdev_priv(dev);
3185 priv->ndev = dev;
3186 priv->ofdev = ofdev;
3187 priv->dev = &ofdev->dev;
3188 SET_NETDEV_DEV(dev, &ofdev->dev);
3189
3190 INIT_WORK(&priv->reset_task, gfar_reset_task);
3191
3192 platform_set_drvdata(ofdev, priv);
3193
3194 gfar_detect_errata(priv);
3195
3196 /* Set the dev->base_addr to the gfar reg region */
3197 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3198
3199 /* Fill in the dev structure */
3200 dev->watchdog_timeo = TX_TIMEOUT;
3201 /* MTU range: 50 - 9586 */
3202 dev->mtu = 1500;
3203 dev->min_mtu = 50;
3204 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3205 dev->netdev_ops = &gfar_netdev_ops;
3206 dev->ethtool_ops = &gfar_ethtool_ops;
3207
3208 /* Register for napi ...We are registering NAPI for each grp */
3209 for (i = 0; i < priv->num_grps; i++) {
3210 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3211 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3212 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3213 gfar_poll_tx_sq, 2);
3214 }
3215
3216 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3217 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3218 NETIF_F_RXCSUM;
3219 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3220 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3221 }
3222
3223 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3224 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3225 NETIF_F_HW_VLAN_CTAG_RX;
3226 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3227 }
3228
3229 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3230
3231 gfar_init_addr_hash_table(priv);
3232
3233 /* Insert receive time stamps into padding alignment bytes, and
3234 * plus 2 bytes padding to ensure the cpu alignment.
3235 */
3236 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3237 priv->padding = 8 + DEFAULT_PADDING;
3238
3239 if (dev->features & NETIF_F_IP_CSUM ||
3240 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3241 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3242
3243 /* Initializing some of the rx/tx queue level parameters */
3244 for (i = 0; i < priv->num_tx_queues; i++) {
3245 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3246 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3247 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3248 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3249 }
3250
3251 for (i = 0; i < priv->num_rx_queues; i++) {
3252 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3253 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3254 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3255 }
3256
3257 /* Always enable rx filer if available */
3258 priv->rx_filer_enable =
3259 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3260 /* Enable most messages by default */
3261 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3262 /* use pritority h/w tx queue scheduling for single queue devices */
3263 if (priv->num_tx_queues == 1)
3264 priv->prio_sched_en = 1;
3265
3266 set_bit(GFAR_DOWN, &priv->state);
3267
3268 gfar_hw_init(priv);
3269
3270 /* Carrier starts down, phylib will bring it up */
3271 netif_carrier_off(dev);
3272
3273 err = register_netdev(dev);
3274
3275 if (err) {
3276 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3277 goto register_fail;
3278 }
3279
3280 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3281 priv->wol_supported |= GFAR_WOL_MAGIC;
3282
3283 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3284 priv->rx_filer_enable)
3285 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3286
3287 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3288
3289 /* fill out IRQ number and name fields */
3290 for (i = 0; i < priv->num_grps; i++) {
3291 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3292 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3293 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3294 dev->name, "_g", '0' + i, "_tx");
3295 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3296 dev->name, "_g", '0' + i, "_rx");
3297 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3298 dev->name, "_g", '0' + i, "_er");
3299 } else
3300 strcpy(gfar_irq(grp, TX)->name, dev->name);
3301 }
3302
3303 /* Initialize the filer table */
3304 gfar_init_filer_table(priv);
3305
3306 /* Print out the device info */
3307 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3308
3309 /* Even more device info helps when determining which kernel
3310 * provided which set of benchmarks.
3311 */
3312 netdev_info(dev, "Running with NAPI enabled\n");
3313 for (i = 0; i < priv->num_rx_queues; i++)
3314 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3315 i, priv->rx_queue[i]->rx_ring_size);
3316 for (i = 0; i < priv->num_tx_queues; i++)
3317 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3318 i, priv->tx_queue[i]->tx_ring_size);
3319
3320 return 0;
3321
3322 register_fail:
3323 if (of_phy_is_fixed_link(np))
3324 of_phy_deregister_fixed_link(np);
3325 unmap_group_regs(priv);
3326 gfar_free_rx_queues(priv);
3327 gfar_free_tx_queues(priv);
3328 of_node_put(priv->phy_node);
3329 of_node_put(priv->tbi_node);
3330 free_gfar_dev(priv);
3331 return err;
3332 }
3333
gfar_remove(struct platform_device * ofdev)3334 static int gfar_remove(struct platform_device *ofdev)
3335 {
3336 struct gfar_private *priv = platform_get_drvdata(ofdev);
3337 struct device_node *np = ofdev->dev.of_node;
3338
3339 of_node_put(priv->phy_node);
3340 of_node_put(priv->tbi_node);
3341
3342 unregister_netdev(priv->ndev);
3343
3344 if (of_phy_is_fixed_link(np))
3345 of_phy_deregister_fixed_link(np);
3346
3347 unmap_group_regs(priv);
3348 gfar_free_rx_queues(priv);
3349 gfar_free_tx_queues(priv);
3350 free_gfar_dev(priv);
3351
3352 return 0;
3353 }
3354
3355 #ifdef CONFIG_PM
3356
__gfar_filer_disable(struct gfar_private * priv)3357 static void __gfar_filer_disable(struct gfar_private *priv)
3358 {
3359 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3360 u32 temp;
3361
3362 temp = gfar_read(®s->rctrl);
3363 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3364 gfar_write(®s->rctrl, temp);
3365 }
3366
__gfar_filer_enable(struct gfar_private * priv)3367 static void __gfar_filer_enable(struct gfar_private *priv)
3368 {
3369 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3370 u32 temp;
3371
3372 temp = gfar_read(®s->rctrl);
3373 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3374 gfar_write(®s->rctrl, temp);
3375 }
3376
3377 /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)3378 static void gfar_filer_config_wol(struct gfar_private *priv)
3379 {
3380 unsigned int i;
3381 u32 rqfcr;
3382
3383 __gfar_filer_disable(priv);
3384
3385 /* clear the filer table, reject any packet by default */
3386 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3387 for (i = 0; i <= MAX_FILER_IDX; i++)
3388 gfar_write_filer(priv, i, rqfcr, 0);
3389
3390 i = 0;
3391 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3392 /* unicast packet, accept it */
3393 struct net_device *ndev = priv->ndev;
3394 /* get the default rx queue index */
3395 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3396 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3397 (ndev->dev_addr[1] << 8) |
3398 ndev->dev_addr[2];
3399
3400 rqfcr = (qindex << 10) | RQFCR_AND |
3401 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3402
3403 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3404
3405 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3406 (ndev->dev_addr[4] << 8) |
3407 ndev->dev_addr[5];
3408 rqfcr = (qindex << 10) | RQFCR_GPI |
3409 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3410 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3411 }
3412
3413 __gfar_filer_enable(priv);
3414 }
3415
gfar_filer_restore_table(struct gfar_private * priv)3416 static void gfar_filer_restore_table(struct gfar_private *priv)
3417 {
3418 u32 rqfcr, rqfpr;
3419 unsigned int i;
3420
3421 __gfar_filer_disable(priv);
3422
3423 for (i = 0; i <= MAX_FILER_IDX; i++) {
3424 rqfcr = priv->ftp_rqfcr[i];
3425 rqfpr = priv->ftp_rqfpr[i];
3426 gfar_write_filer(priv, i, rqfcr, rqfpr);
3427 }
3428
3429 __gfar_filer_enable(priv);
3430 }
3431
3432 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)3433 static void gfar_start_wol_filer(struct gfar_private *priv)
3434 {
3435 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3436 u32 tempval;
3437 int i = 0;
3438
3439 /* Enable Rx hw queues */
3440 gfar_write(®s->rqueue, priv->rqueue);
3441
3442 /* Initialize DMACTRL to have WWR and WOP */
3443 tempval = gfar_read(®s->dmactrl);
3444 tempval |= DMACTRL_INIT_SETTINGS;
3445 gfar_write(®s->dmactrl, tempval);
3446
3447 /* Make sure we aren't stopped */
3448 tempval = gfar_read(®s->dmactrl);
3449 tempval &= ~DMACTRL_GRS;
3450 gfar_write(®s->dmactrl, tempval);
3451
3452 for (i = 0; i < priv->num_grps; i++) {
3453 regs = priv->gfargrp[i].regs;
3454 /* Clear RHLT, so that the DMA starts polling now */
3455 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3456 /* enable the Filer General Purpose Interrupt */
3457 gfar_write(®s->imask, IMASK_FGPI);
3458 }
3459
3460 /* Enable Rx DMA */
3461 tempval = gfar_read(®s->maccfg1);
3462 tempval |= MACCFG1_RX_EN;
3463 gfar_write(®s->maccfg1, tempval);
3464 }
3465
gfar_suspend(struct device * dev)3466 static int gfar_suspend(struct device *dev)
3467 {
3468 struct gfar_private *priv = dev_get_drvdata(dev);
3469 struct net_device *ndev = priv->ndev;
3470 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3471 u32 tempval;
3472 u16 wol = priv->wol_opts;
3473
3474 if (!netif_running(ndev))
3475 return 0;
3476
3477 disable_napi(priv);
3478 netif_tx_lock(ndev);
3479 netif_device_detach(ndev);
3480 netif_tx_unlock(ndev);
3481
3482 gfar_halt(priv);
3483
3484 if (wol & GFAR_WOL_MAGIC) {
3485 /* Enable interrupt on Magic Packet */
3486 gfar_write(®s->imask, IMASK_MAG);
3487
3488 /* Enable Magic Packet mode */
3489 tempval = gfar_read(®s->maccfg2);
3490 tempval |= MACCFG2_MPEN;
3491 gfar_write(®s->maccfg2, tempval);
3492
3493 /* re-enable the Rx block */
3494 tempval = gfar_read(®s->maccfg1);
3495 tempval |= MACCFG1_RX_EN;
3496 gfar_write(®s->maccfg1, tempval);
3497
3498 } else if (wol & GFAR_WOL_FILER_UCAST) {
3499 gfar_filer_config_wol(priv);
3500 gfar_start_wol_filer(priv);
3501
3502 } else {
3503 phy_stop(ndev->phydev);
3504 }
3505
3506 return 0;
3507 }
3508
gfar_resume(struct device * dev)3509 static int gfar_resume(struct device *dev)
3510 {
3511 struct gfar_private *priv = dev_get_drvdata(dev);
3512 struct net_device *ndev = priv->ndev;
3513 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3514 u32 tempval;
3515 u16 wol = priv->wol_opts;
3516
3517 if (!netif_running(ndev))
3518 return 0;
3519
3520 if (wol & GFAR_WOL_MAGIC) {
3521 /* Disable Magic Packet mode */
3522 tempval = gfar_read(®s->maccfg2);
3523 tempval &= ~MACCFG2_MPEN;
3524 gfar_write(®s->maccfg2, tempval);
3525
3526 } else if (wol & GFAR_WOL_FILER_UCAST) {
3527 /* need to stop rx only, tx is already down */
3528 gfar_halt(priv);
3529 gfar_filer_restore_table(priv);
3530
3531 } else {
3532 phy_start(ndev->phydev);
3533 }
3534
3535 gfar_start(priv);
3536
3537 netif_device_attach(ndev);
3538 enable_napi(priv);
3539
3540 return 0;
3541 }
3542
gfar_restore(struct device * dev)3543 static int gfar_restore(struct device *dev)
3544 {
3545 struct gfar_private *priv = dev_get_drvdata(dev);
3546 struct net_device *ndev = priv->ndev;
3547
3548 if (!netif_running(ndev)) {
3549 netif_device_attach(ndev);
3550
3551 return 0;
3552 }
3553
3554 gfar_init_bds(ndev);
3555
3556 gfar_mac_reset(priv);
3557
3558 gfar_init_tx_rx_base(priv);
3559
3560 gfar_start(priv);
3561
3562 priv->oldlink = 0;
3563 priv->oldspeed = 0;
3564 priv->oldduplex = -1;
3565
3566 if (ndev->phydev)
3567 phy_start(ndev->phydev);
3568
3569 netif_device_attach(ndev);
3570 enable_napi(priv);
3571
3572 return 0;
3573 }
3574
3575 static const struct dev_pm_ops gfar_pm_ops = {
3576 .suspend = gfar_suspend,
3577 .resume = gfar_resume,
3578 .freeze = gfar_suspend,
3579 .thaw = gfar_resume,
3580 .restore = gfar_restore,
3581 };
3582
3583 #define GFAR_PM_OPS (&gfar_pm_ops)
3584
3585 #else
3586
3587 #define GFAR_PM_OPS NULL
3588
3589 #endif
3590
3591 static const struct of_device_id gfar_match[] =
3592 {
3593 {
3594 .type = "network",
3595 .compatible = "gianfar",
3596 },
3597 {
3598 .compatible = "fsl,etsec2",
3599 },
3600 {},
3601 };
3602 MODULE_DEVICE_TABLE(of, gfar_match);
3603
3604 /* Structure for a device driver */
3605 static struct platform_driver gfar_driver = {
3606 .driver = {
3607 .name = "fsl-gianfar",
3608 .pm = GFAR_PM_OPS,
3609 .of_match_table = gfar_match,
3610 },
3611 .probe = gfar_probe,
3612 .remove = gfar_remove,
3613 };
3614
3615 module_platform_driver(gfar_driver);
3616