xref: /linux/drivers/net/ethernet/freescale/gianfar.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12  *
13  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14  * Copyright 2007 MontaVista Software, Inc.
15  *
16  *  Gianfar:  AKA Lambda Draconis, "Dragon"
17  *  RA 11 31 24.2
18  *  Dec +69 19 52
19  *  V 3.84
20  *  B-V +1.62
21  *
22  *  Theory of operation
23  *
24  *  The driver is initialized through of_device. Configuration information
25  *  is therefore conveyed through an OF-style device tree.
26  *
27  *  The Gianfar Ethernet Controller uses a ring of buffer
28  *  descriptors.  The beginning is indicated by a register
29  *  pointing to the physical address of the start of the ring.
30  *  The end is determined by a "wrap" bit being set in the
31  *  last descriptor of the ring.
32  *
33  *  When a packet is received, the RXF bit in the
34  *  IEVENT register is set, triggering an interrupt when the
35  *  corresponding bit in the IMASK register is also set (if
36  *  interrupt coalescing is active, then the interrupt may not
37  *  happen immediately, but will wait until either a set number
38  *  of frames or amount of time have passed).  In NAPI, the
39  *  interrupt handler will signal there is work to be done, and
40  *  exit. This method will start at the last known empty
41  *  descriptor, and process every subsequent descriptor until there
42  *  are none left with data (NAPI will stop after a set number of
43  *  packets to give time to other tasks, but will eventually
44  *  process all the packets).  The data arrives inside a
45  *  pre-allocated skb, and so after the skb is passed up to the
46  *  stack, a new skb must be allocated, and the address field in
47  *  the buffer descriptor must be updated to indicate this new
48  *  skb.
49  *
50  *  When the kernel requests that a packet be transmitted, the
51  *  driver starts where it left off last time, and points the
52  *  descriptor at the buffer which was passed in.  The driver
53  *  then informs the DMA engine that there are packets ready to
54  *  be transmitted.  Once the controller is finished transmitting
55  *  the packet, an interrupt may be triggered (under the same
56  *  conditions as for reception, but depending on the TXF bit).
57  *  The driver then cleans up the buffer.
58  */
59 
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61 #define DEBUG
62 
63 #include <linux/kernel.h>
64 #include <linux/string.h>
65 #include <linux/errno.h>
66 #include <linux/unistd.h>
67 #include <linux/slab.h>
68 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/if_vlan.h>
74 #include <linux/spinlock.h>
75 #include <linux/mm.h>
76 #include <linux/of_address.h>
77 #include <linux/of_irq.h>
78 #include <linux/of_mdio.h>
79 #include <linux/of_platform.h>
80 #include <linux/ip.h>
81 #include <linux/tcp.h>
82 #include <linux/udp.h>
83 #include <linux/in.h>
84 #include <linux/net_tstamp.h>
85 
86 #include <asm/io.h>
87 #ifdef CONFIG_PPC
88 #include <asm/reg.h>
89 #include <asm/mpc85xx.h>
90 #endif
91 #include <asm/irq.h>
92 #include <linux/uaccess.h>
93 #include <linux/module.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98 #include <linux/phy_fixed.h>
99 #include <linux/of.h>
100 #include <linux/of_net.h>
101 
102 #include "gianfar.h"
103 
104 #define TX_TIMEOUT      (5*HZ)
105 
106 const char gfar_driver_version[] = "2.0";
107 
108 MODULE_AUTHOR("Freescale Semiconductor, Inc");
109 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
110 MODULE_LICENSE("GPL");
111 
112 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 			    dma_addr_t buf)
114 {
115 	u32 lstatus;
116 
117 	bdp->bufPtr = cpu_to_be32(buf);
118 
119 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
120 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
121 		lstatus |= BD_LFLAG(RXBD_WRAP);
122 
123 	gfar_wmb();
124 
125 	bdp->lstatus = cpu_to_be32(lstatus);
126 }
127 
128 static void gfar_init_tx_rx_base(struct gfar_private *priv)
129 {
130 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
131 	u32 __iomem *baddr;
132 	int i;
133 
134 	baddr = &regs->tbase0;
135 	for (i = 0; i < priv->num_tx_queues; i++) {
136 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
137 		baddr += 2;
138 	}
139 
140 	baddr = &regs->rbase0;
141 	for (i = 0; i < priv->num_rx_queues; i++) {
142 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
143 		baddr += 2;
144 	}
145 }
146 
147 static void gfar_init_rqprm(struct gfar_private *priv)
148 {
149 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
150 	u32 __iomem *baddr;
151 	int i;
152 
153 	baddr = &regs->rqprm0;
154 	for (i = 0; i < priv->num_rx_queues; i++) {
155 		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
156 			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
157 		baddr++;
158 	}
159 }
160 
161 static void gfar_rx_offload_en(struct gfar_private *priv)
162 {
163 	/* set this when rx hw offload (TOE) functions are being used */
164 	priv->uses_rxfcb = 0;
165 
166 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
167 		priv->uses_rxfcb = 1;
168 
169 	if (priv->hwts_rx_en || priv->rx_filer_enable)
170 		priv->uses_rxfcb = 1;
171 }
172 
173 static void gfar_mac_rx_config(struct gfar_private *priv)
174 {
175 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
176 	u32 rctrl = 0;
177 
178 	if (priv->rx_filer_enable) {
179 		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
180 		/* Program the RIR0 reg with the required distribution */
181 		if (priv->poll_mode == GFAR_SQ_POLLING)
182 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
183 		else /* GFAR_MQ_POLLING */
184 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
185 	}
186 
187 	/* Restore PROMISC mode */
188 	if (priv->ndev->flags & IFF_PROMISC)
189 		rctrl |= RCTRL_PROM;
190 
191 	if (priv->ndev->features & NETIF_F_RXCSUM)
192 		rctrl |= RCTRL_CHECKSUMMING;
193 
194 	if (priv->extended_hash)
195 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
196 
197 	if (priv->padding) {
198 		rctrl &= ~RCTRL_PAL_MASK;
199 		rctrl |= RCTRL_PADDING(priv->padding);
200 	}
201 
202 	/* Enable HW time stamping if requested from user space */
203 	if (priv->hwts_rx_en)
204 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
205 
206 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
207 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
208 
209 	/* Clear the LFC bit */
210 	gfar_write(&regs->rctrl, rctrl);
211 	/* Init flow control threshold values */
212 	gfar_init_rqprm(priv);
213 	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
214 	rctrl |= RCTRL_LFC;
215 
216 	/* Init rctrl based on our settings */
217 	gfar_write(&regs->rctrl, rctrl);
218 }
219 
220 static void gfar_mac_tx_config(struct gfar_private *priv)
221 {
222 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
223 	u32 tctrl = 0;
224 
225 	if (priv->ndev->features & NETIF_F_IP_CSUM)
226 		tctrl |= TCTRL_INIT_CSUM;
227 
228 	if (priv->prio_sched_en)
229 		tctrl |= TCTRL_TXSCHED_PRIO;
230 	else {
231 		tctrl |= TCTRL_TXSCHED_WRRS;
232 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
233 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
234 	}
235 
236 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
237 		tctrl |= TCTRL_VLINS;
238 
239 	gfar_write(&regs->tctrl, tctrl);
240 }
241 
242 static void gfar_configure_coalescing(struct gfar_private *priv,
243 			       unsigned long tx_mask, unsigned long rx_mask)
244 {
245 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
246 	u32 __iomem *baddr;
247 
248 	if (priv->mode == MQ_MG_MODE) {
249 		int i = 0;
250 
251 		baddr = &regs->txic0;
252 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
253 			gfar_write(baddr + i, 0);
254 			if (likely(priv->tx_queue[i]->txcoalescing))
255 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
256 		}
257 
258 		baddr = &regs->rxic0;
259 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
260 			gfar_write(baddr + i, 0);
261 			if (likely(priv->rx_queue[i]->rxcoalescing))
262 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
263 		}
264 	} else {
265 		/* Backward compatible case -- even if we enable
266 		 * multiple queues, there's only single reg to program
267 		 */
268 		gfar_write(&regs->txic, 0);
269 		if (likely(priv->tx_queue[0]->txcoalescing))
270 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
271 
272 		gfar_write(&regs->rxic, 0);
273 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
274 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
275 	}
276 }
277 
278 static void gfar_configure_coalescing_all(struct gfar_private *priv)
279 {
280 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
281 }
282 
283 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
284 {
285 	struct gfar_private *priv = netdev_priv(dev);
286 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
287 	unsigned long tx_packets = 0, tx_bytes = 0;
288 	int i;
289 
290 	for (i = 0; i < priv->num_rx_queues; i++) {
291 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
292 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
293 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
294 	}
295 
296 	dev->stats.rx_packets = rx_packets;
297 	dev->stats.rx_bytes   = rx_bytes;
298 	dev->stats.rx_dropped = rx_dropped;
299 
300 	for (i = 0; i < priv->num_tx_queues; i++) {
301 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
302 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
303 	}
304 
305 	dev->stats.tx_bytes   = tx_bytes;
306 	dev->stats.tx_packets = tx_packets;
307 
308 	return &dev->stats;
309 }
310 
311 /* Set the appropriate hash bit for the given addr */
312 /* The algorithm works like so:
313  * 1) Take the Destination Address (ie the multicast address), and
314  * do a CRC on it (little endian), and reverse the bits of the
315  * result.
316  * 2) Use the 8 most significant bits as a hash into a 256-entry
317  * table.  The table is controlled through 8 32-bit registers:
318  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
319  * gaddr7.  This means that the 3 most significant bits in the
320  * hash index which gaddr register to use, and the 5 other bits
321  * indicate which bit (assuming an IBM numbering scheme, which
322  * for PowerPC (tm) is usually the case) in the register holds
323  * the entry.
324  */
325 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
326 {
327 	u32 tempval;
328 	struct gfar_private *priv = netdev_priv(dev);
329 	u32 result = ether_crc(ETH_ALEN, addr);
330 	int width = priv->hash_width;
331 	u8 whichbit = (result >> (32 - width)) & 0x1f;
332 	u8 whichreg = result >> (32 - width + 5);
333 	u32 value = (1 << (31-whichbit));
334 
335 	tempval = gfar_read(priv->hash_regs[whichreg]);
336 	tempval |= value;
337 	gfar_write(priv->hash_regs[whichreg], tempval);
338 }
339 
340 /* There are multiple MAC Address register pairs on some controllers
341  * This function sets the numth pair to a given address
342  */
343 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
344 				  const u8 *addr)
345 {
346 	struct gfar_private *priv = netdev_priv(dev);
347 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
348 	u32 tempval;
349 	u32 __iomem *macptr = &regs->macstnaddr1;
350 
351 	macptr += num*2;
352 
353 	/* For a station address of 0x12345678ABCD in transmission
354 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
355 	 * MACnADDR2 is set to 0x34120000.
356 	 */
357 	tempval = (addr[5] << 24) | (addr[4] << 16) |
358 		  (addr[3] << 8)  |  addr[2];
359 
360 	gfar_write(macptr, tempval);
361 
362 	tempval = (addr[1] << 24) | (addr[0] << 16);
363 
364 	gfar_write(macptr+1, tempval);
365 }
366 
367 static int gfar_set_mac_addr(struct net_device *dev, void *p)
368 {
369 	eth_mac_addr(dev, p);
370 
371 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
372 
373 	return 0;
374 }
375 
376 static void gfar_ints_disable(struct gfar_private *priv)
377 {
378 	int i;
379 	for (i = 0; i < priv->num_grps; i++) {
380 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
381 		/* Clear IEVENT */
382 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
383 
384 		/* Initialize IMASK */
385 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
386 	}
387 }
388 
389 static void gfar_ints_enable(struct gfar_private *priv)
390 {
391 	int i;
392 	for (i = 0; i < priv->num_grps; i++) {
393 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
394 		/* Unmask the interrupts we look for */
395 		gfar_write(&regs->imask, IMASK_DEFAULT);
396 	}
397 }
398 
399 static int gfar_alloc_tx_queues(struct gfar_private *priv)
400 {
401 	int i;
402 
403 	for (i = 0; i < priv->num_tx_queues; i++) {
404 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
405 					    GFP_KERNEL);
406 		if (!priv->tx_queue[i])
407 			return -ENOMEM;
408 
409 		priv->tx_queue[i]->tx_skbuff = NULL;
410 		priv->tx_queue[i]->qindex = i;
411 		priv->tx_queue[i]->dev = priv->ndev;
412 		spin_lock_init(&(priv->tx_queue[i]->txlock));
413 	}
414 	return 0;
415 }
416 
417 static int gfar_alloc_rx_queues(struct gfar_private *priv)
418 {
419 	int i;
420 
421 	for (i = 0; i < priv->num_rx_queues; i++) {
422 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
423 					    GFP_KERNEL);
424 		if (!priv->rx_queue[i])
425 			return -ENOMEM;
426 
427 		priv->rx_queue[i]->qindex = i;
428 		priv->rx_queue[i]->ndev = priv->ndev;
429 	}
430 	return 0;
431 }
432 
433 static void gfar_free_tx_queues(struct gfar_private *priv)
434 {
435 	int i;
436 
437 	for (i = 0; i < priv->num_tx_queues; i++)
438 		kfree(priv->tx_queue[i]);
439 }
440 
441 static void gfar_free_rx_queues(struct gfar_private *priv)
442 {
443 	int i;
444 
445 	for (i = 0; i < priv->num_rx_queues; i++)
446 		kfree(priv->rx_queue[i]);
447 }
448 
449 static void unmap_group_regs(struct gfar_private *priv)
450 {
451 	int i;
452 
453 	for (i = 0; i < MAXGROUPS; i++)
454 		if (priv->gfargrp[i].regs)
455 			iounmap(priv->gfargrp[i].regs);
456 }
457 
458 static void free_gfar_dev(struct gfar_private *priv)
459 {
460 	int i, j;
461 
462 	for (i = 0; i < priv->num_grps; i++)
463 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
464 			kfree(priv->gfargrp[i].irqinfo[j]);
465 			priv->gfargrp[i].irqinfo[j] = NULL;
466 		}
467 
468 	free_netdev(priv->ndev);
469 }
470 
471 static void disable_napi(struct gfar_private *priv)
472 {
473 	int i;
474 
475 	for (i = 0; i < priv->num_grps; i++) {
476 		napi_disable(&priv->gfargrp[i].napi_rx);
477 		napi_disable(&priv->gfargrp[i].napi_tx);
478 	}
479 }
480 
481 static void enable_napi(struct gfar_private *priv)
482 {
483 	int i;
484 
485 	for (i = 0; i < priv->num_grps; i++) {
486 		napi_enable(&priv->gfargrp[i].napi_rx);
487 		napi_enable(&priv->gfargrp[i].napi_tx);
488 	}
489 }
490 
491 static int gfar_parse_group(struct device_node *np,
492 			    struct gfar_private *priv, const char *model)
493 {
494 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
495 	int i;
496 
497 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
498 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
499 					  GFP_KERNEL);
500 		if (!grp->irqinfo[i])
501 			return -ENOMEM;
502 	}
503 
504 	grp->regs = of_iomap(np, 0);
505 	if (!grp->regs)
506 		return -ENOMEM;
507 
508 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
509 
510 	/* If we aren't the FEC we have multiple interrupts */
511 	if (model && strcasecmp(model, "FEC")) {
512 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
513 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
514 		if (!gfar_irq(grp, TX)->irq ||
515 		    !gfar_irq(grp, RX)->irq ||
516 		    !gfar_irq(grp, ER)->irq)
517 			return -EINVAL;
518 	}
519 
520 	grp->priv = priv;
521 	spin_lock_init(&grp->grplock);
522 	if (priv->mode == MQ_MG_MODE) {
523 		u32 rxq_mask, txq_mask;
524 		int ret;
525 
526 		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
527 		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
528 
529 		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
530 		if (!ret) {
531 			grp->rx_bit_map = rxq_mask ?
532 			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
533 		}
534 
535 		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
536 		if (!ret) {
537 			grp->tx_bit_map = txq_mask ?
538 			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
539 		}
540 
541 		if (priv->poll_mode == GFAR_SQ_POLLING) {
542 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
543 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
544 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
545 		}
546 	} else {
547 		grp->rx_bit_map = 0xFF;
548 		grp->tx_bit_map = 0xFF;
549 	}
550 
551 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
552 	 * right to left, so we need to revert the 8 bits to get the q index
553 	 */
554 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
555 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
556 
557 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
558 	 * also assign queues to groups
559 	 */
560 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
561 		if (!grp->rx_queue)
562 			grp->rx_queue = priv->rx_queue[i];
563 		grp->num_rx_queues++;
564 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
565 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
566 		priv->rx_queue[i]->grp = grp;
567 	}
568 
569 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
570 		if (!grp->tx_queue)
571 			grp->tx_queue = priv->tx_queue[i];
572 		grp->num_tx_queues++;
573 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
574 		priv->tqueue |= (TQUEUE_EN0 >> i);
575 		priv->tx_queue[i]->grp = grp;
576 	}
577 
578 	priv->num_grps++;
579 
580 	return 0;
581 }
582 
583 static int gfar_of_group_count(struct device_node *np)
584 {
585 	struct device_node *child;
586 	int num = 0;
587 
588 	for_each_available_child_of_node(np, child)
589 		if (of_node_name_eq(child, "queue-group"))
590 			num++;
591 
592 	return num;
593 }
594 
595 /* Reads the controller's registers to determine what interface
596  * connects it to the PHY.
597  */
598 static phy_interface_t gfar_get_interface(struct net_device *dev)
599 {
600 	struct gfar_private *priv = netdev_priv(dev);
601 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
602 	u32 ecntrl;
603 
604 	ecntrl = gfar_read(&regs->ecntrl);
605 
606 	if (ecntrl & ECNTRL_SGMII_MODE)
607 		return PHY_INTERFACE_MODE_SGMII;
608 
609 	if (ecntrl & ECNTRL_TBI_MODE) {
610 		if (ecntrl & ECNTRL_REDUCED_MODE)
611 			return PHY_INTERFACE_MODE_RTBI;
612 		else
613 			return PHY_INTERFACE_MODE_TBI;
614 	}
615 
616 	if (ecntrl & ECNTRL_REDUCED_MODE) {
617 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
618 			return PHY_INTERFACE_MODE_RMII;
619 		}
620 		else {
621 			phy_interface_t interface = priv->interface;
622 
623 			/* This isn't autodetected right now, so it must
624 			 * be set by the device tree or platform code.
625 			 */
626 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
627 				return PHY_INTERFACE_MODE_RGMII_ID;
628 
629 			return PHY_INTERFACE_MODE_RGMII;
630 		}
631 	}
632 
633 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
634 		return PHY_INTERFACE_MODE_GMII;
635 
636 	return PHY_INTERFACE_MODE_MII;
637 }
638 
639 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
640 {
641 	const char *model;
642 	const void *mac_addr;
643 	int err = 0, i;
644 	struct net_device *dev = NULL;
645 	struct gfar_private *priv = NULL;
646 	struct device_node *np = ofdev->dev.of_node;
647 	struct device_node *child = NULL;
648 	u32 stash_len = 0;
649 	u32 stash_idx = 0;
650 	unsigned int num_tx_qs, num_rx_qs;
651 	unsigned short mode, poll_mode;
652 
653 	if (!np)
654 		return -ENODEV;
655 
656 	if (of_device_is_compatible(np, "fsl,etsec2")) {
657 		mode = MQ_MG_MODE;
658 		poll_mode = GFAR_SQ_POLLING;
659 	} else {
660 		mode = SQ_SG_MODE;
661 		poll_mode = GFAR_SQ_POLLING;
662 	}
663 
664 	if (mode == SQ_SG_MODE) {
665 		num_tx_qs = 1;
666 		num_rx_qs = 1;
667 	} else { /* MQ_MG_MODE */
668 		/* get the actual number of supported groups */
669 		unsigned int num_grps = gfar_of_group_count(np);
670 
671 		if (num_grps == 0 || num_grps > MAXGROUPS) {
672 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
673 				num_grps);
674 			pr_err("Cannot do alloc_etherdev, aborting\n");
675 			return -EINVAL;
676 		}
677 
678 		if (poll_mode == GFAR_SQ_POLLING) {
679 			num_tx_qs = num_grps; /* one txq per int group */
680 			num_rx_qs = num_grps; /* one rxq per int group */
681 		} else { /* GFAR_MQ_POLLING */
682 			u32 tx_queues, rx_queues;
683 			int ret;
684 
685 			/* parse the num of HW tx and rx queues */
686 			ret = of_property_read_u32(np, "fsl,num_tx_queues",
687 						   &tx_queues);
688 			num_tx_qs = ret ? 1 : tx_queues;
689 
690 			ret = of_property_read_u32(np, "fsl,num_rx_queues",
691 						   &rx_queues);
692 			num_rx_qs = ret ? 1 : rx_queues;
693 		}
694 	}
695 
696 	if (num_tx_qs > MAX_TX_QS) {
697 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
698 		       num_tx_qs, MAX_TX_QS);
699 		pr_err("Cannot do alloc_etherdev, aborting\n");
700 		return -EINVAL;
701 	}
702 
703 	if (num_rx_qs > MAX_RX_QS) {
704 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
705 		       num_rx_qs, MAX_RX_QS);
706 		pr_err("Cannot do alloc_etherdev, aborting\n");
707 		return -EINVAL;
708 	}
709 
710 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
711 	dev = *pdev;
712 	if (NULL == dev)
713 		return -ENOMEM;
714 
715 	priv = netdev_priv(dev);
716 	priv->ndev = dev;
717 
718 	priv->mode = mode;
719 	priv->poll_mode = poll_mode;
720 
721 	priv->num_tx_queues = num_tx_qs;
722 	netif_set_real_num_rx_queues(dev, num_rx_qs);
723 	priv->num_rx_queues = num_rx_qs;
724 
725 	err = gfar_alloc_tx_queues(priv);
726 	if (err)
727 		goto tx_alloc_failed;
728 
729 	err = gfar_alloc_rx_queues(priv);
730 	if (err)
731 		goto rx_alloc_failed;
732 
733 	err = of_property_read_string(np, "model", &model);
734 	if (err) {
735 		pr_err("Device model property missing, aborting\n");
736 		goto rx_alloc_failed;
737 	}
738 
739 	/* Init Rx queue filer rule set linked list */
740 	INIT_LIST_HEAD(&priv->rx_list.list);
741 	priv->rx_list.count = 0;
742 	mutex_init(&priv->rx_queue_access);
743 
744 	for (i = 0; i < MAXGROUPS; i++)
745 		priv->gfargrp[i].regs = NULL;
746 
747 	/* Parse and initialize group specific information */
748 	if (priv->mode == MQ_MG_MODE) {
749 		for_each_available_child_of_node(np, child) {
750 			if (!of_node_name_eq(child, "queue-group"))
751 				continue;
752 
753 			err = gfar_parse_group(child, priv, model);
754 			if (err)
755 				goto err_grp_init;
756 		}
757 	} else { /* SQ_SG_MODE */
758 		err = gfar_parse_group(np, priv, model);
759 		if (err)
760 			goto err_grp_init;
761 	}
762 
763 	if (of_property_read_bool(np, "bd-stash")) {
764 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
765 		priv->bd_stash_en = 1;
766 	}
767 
768 	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
769 
770 	if (err == 0)
771 		priv->rx_stash_size = stash_len;
772 
773 	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
774 
775 	if (err == 0)
776 		priv->rx_stash_index = stash_idx;
777 
778 	if (stash_len || stash_idx)
779 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
780 
781 	mac_addr = of_get_mac_address(np);
782 
783 	if (!IS_ERR(mac_addr))
784 		ether_addr_copy(dev->dev_addr, mac_addr);
785 
786 	if (model && !strcasecmp(model, "TSEC"))
787 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
788 				     FSL_GIANFAR_DEV_HAS_COALESCE |
789 				     FSL_GIANFAR_DEV_HAS_RMON |
790 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
791 
792 	if (model && !strcasecmp(model, "eTSEC"))
793 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
794 				     FSL_GIANFAR_DEV_HAS_COALESCE |
795 				     FSL_GIANFAR_DEV_HAS_RMON |
796 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
797 				     FSL_GIANFAR_DEV_HAS_CSUM |
798 				     FSL_GIANFAR_DEV_HAS_VLAN |
799 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
800 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
801 				     FSL_GIANFAR_DEV_HAS_TIMER |
802 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
803 
804 	/* Use PHY connection type from the DT node if one is specified there.
805 	 * rgmii-id really needs to be specified. Other types can be
806 	 * detected by hardware
807 	 */
808 	err = of_get_phy_mode(np);
809 	if (err >= 0)
810 		priv->interface = err;
811 	else
812 		priv->interface = gfar_get_interface(dev);
813 
814 	if (of_find_property(np, "fsl,magic-packet", NULL))
815 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
816 
817 	if (of_get_property(np, "fsl,wake-on-filer", NULL))
818 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
819 
820 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
821 
822 	/* In the case of a fixed PHY, the DT node associated
823 	 * to the PHY is the Ethernet MAC DT node.
824 	 */
825 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
826 		err = of_phy_register_fixed_link(np);
827 		if (err)
828 			goto err_grp_init;
829 
830 		priv->phy_node = of_node_get(np);
831 	}
832 
833 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
834 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
835 
836 	return 0;
837 
838 err_grp_init:
839 	unmap_group_regs(priv);
840 rx_alloc_failed:
841 	gfar_free_rx_queues(priv);
842 tx_alloc_failed:
843 	gfar_free_tx_queues(priv);
844 	free_gfar_dev(priv);
845 	return err;
846 }
847 
848 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
849 				   u32 class)
850 {
851 	u32 rqfpr = FPR_FILER_MASK;
852 	u32 rqfcr = 0x0;
853 
854 	rqfar--;
855 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
856 	priv->ftp_rqfpr[rqfar] = rqfpr;
857 	priv->ftp_rqfcr[rqfar] = rqfcr;
858 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
859 
860 	rqfar--;
861 	rqfcr = RQFCR_CMP_NOMATCH;
862 	priv->ftp_rqfpr[rqfar] = rqfpr;
863 	priv->ftp_rqfcr[rqfar] = rqfcr;
864 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
865 
866 	rqfar--;
867 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
868 	rqfpr = class;
869 	priv->ftp_rqfcr[rqfar] = rqfcr;
870 	priv->ftp_rqfpr[rqfar] = rqfpr;
871 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
872 
873 	rqfar--;
874 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
875 	rqfpr = class;
876 	priv->ftp_rqfcr[rqfar] = rqfcr;
877 	priv->ftp_rqfpr[rqfar] = rqfpr;
878 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
879 
880 	return rqfar;
881 }
882 
883 static void gfar_init_filer_table(struct gfar_private *priv)
884 {
885 	int i = 0x0;
886 	u32 rqfar = MAX_FILER_IDX;
887 	u32 rqfcr = 0x0;
888 	u32 rqfpr = FPR_FILER_MASK;
889 
890 	/* Default rule */
891 	rqfcr = RQFCR_CMP_MATCH;
892 	priv->ftp_rqfcr[rqfar] = rqfcr;
893 	priv->ftp_rqfpr[rqfar] = rqfpr;
894 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
895 
896 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
897 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
898 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
899 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
900 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
901 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
902 
903 	/* cur_filer_idx indicated the first non-masked rule */
904 	priv->cur_filer_idx = rqfar;
905 
906 	/* Rest are masked rules */
907 	rqfcr = RQFCR_CMP_NOMATCH;
908 	for (i = 0; i < rqfar; i++) {
909 		priv->ftp_rqfcr[i] = rqfcr;
910 		priv->ftp_rqfpr[i] = rqfpr;
911 		gfar_write_filer(priv, i, rqfcr, rqfpr);
912 	}
913 }
914 
915 #ifdef CONFIG_PPC
916 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
917 {
918 	unsigned int pvr = mfspr(SPRN_PVR);
919 	unsigned int svr = mfspr(SPRN_SVR);
920 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
921 	unsigned int rev = svr & 0xffff;
922 
923 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
924 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
925 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
926 		priv->errata |= GFAR_ERRATA_74;
927 
928 	/* MPC8313 and MPC837x all rev */
929 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
930 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
931 		priv->errata |= GFAR_ERRATA_76;
932 
933 	/* MPC8313 Rev < 2.0 */
934 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
935 		priv->errata |= GFAR_ERRATA_12;
936 }
937 
938 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
939 {
940 	unsigned int svr = mfspr(SPRN_SVR);
941 
942 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
943 		priv->errata |= GFAR_ERRATA_12;
944 	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
945 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
946 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
947 	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
948 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
949 }
950 #endif
951 
952 static void gfar_detect_errata(struct gfar_private *priv)
953 {
954 	struct device *dev = &priv->ofdev->dev;
955 
956 	/* no plans to fix */
957 	priv->errata |= GFAR_ERRATA_A002;
958 
959 #ifdef CONFIG_PPC
960 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
961 		__gfar_detect_errata_85xx(priv);
962 	else /* non-mpc85xx parts, i.e. e300 core based */
963 		__gfar_detect_errata_83xx(priv);
964 #endif
965 
966 	if (priv->errata)
967 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
968 			 priv->errata);
969 }
970 
971 static void gfar_init_addr_hash_table(struct gfar_private *priv)
972 {
973 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
974 
975 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
976 		priv->extended_hash = 1;
977 		priv->hash_width = 9;
978 
979 		priv->hash_regs[0] = &regs->igaddr0;
980 		priv->hash_regs[1] = &regs->igaddr1;
981 		priv->hash_regs[2] = &regs->igaddr2;
982 		priv->hash_regs[3] = &regs->igaddr3;
983 		priv->hash_regs[4] = &regs->igaddr4;
984 		priv->hash_regs[5] = &regs->igaddr5;
985 		priv->hash_regs[6] = &regs->igaddr6;
986 		priv->hash_regs[7] = &regs->igaddr7;
987 		priv->hash_regs[8] = &regs->gaddr0;
988 		priv->hash_regs[9] = &regs->gaddr1;
989 		priv->hash_regs[10] = &regs->gaddr2;
990 		priv->hash_regs[11] = &regs->gaddr3;
991 		priv->hash_regs[12] = &regs->gaddr4;
992 		priv->hash_regs[13] = &regs->gaddr5;
993 		priv->hash_regs[14] = &regs->gaddr6;
994 		priv->hash_regs[15] = &regs->gaddr7;
995 
996 	} else {
997 		priv->extended_hash = 0;
998 		priv->hash_width = 8;
999 
1000 		priv->hash_regs[0] = &regs->gaddr0;
1001 		priv->hash_regs[1] = &regs->gaddr1;
1002 		priv->hash_regs[2] = &regs->gaddr2;
1003 		priv->hash_regs[3] = &regs->gaddr3;
1004 		priv->hash_regs[4] = &regs->gaddr4;
1005 		priv->hash_regs[5] = &regs->gaddr5;
1006 		priv->hash_regs[6] = &regs->gaddr6;
1007 		priv->hash_regs[7] = &regs->gaddr7;
1008 	}
1009 }
1010 
1011 static int __gfar_is_rx_idle(struct gfar_private *priv)
1012 {
1013 	u32 res;
1014 
1015 	/* Normaly TSEC should not hang on GRS commands, so we should
1016 	 * actually wait for IEVENT_GRSC flag.
1017 	 */
1018 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1019 		return 0;
1020 
1021 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1022 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1023 	 * and the Rx can be safely reset.
1024 	 */
1025 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1026 	res &= 0x7f807f80;
1027 	if ((res & 0xffff) == (res >> 16))
1028 		return 1;
1029 
1030 	return 0;
1031 }
1032 
1033 /* Halt the receive and transmit queues */
1034 static void gfar_halt_nodisable(struct gfar_private *priv)
1035 {
1036 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1037 	u32 tempval;
1038 	unsigned int timeout;
1039 	int stopped;
1040 
1041 	gfar_ints_disable(priv);
1042 
1043 	if (gfar_is_dma_stopped(priv))
1044 		return;
1045 
1046 	/* Stop the DMA, and wait for it to stop */
1047 	tempval = gfar_read(&regs->dmactrl);
1048 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1049 	gfar_write(&regs->dmactrl, tempval);
1050 
1051 retry:
1052 	timeout = 1000;
1053 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1054 		cpu_relax();
1055 		timeout--;
1056 	}
1057 
1058 	if (!timeout)
1059 		stopped = gfar_is_dma_stopped(priv);
1060 
1061 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1062 	    !__gfar_is_rx_idle(priv))
1063 		goto retry;
1064 }
1065 
1066 /* Halt the receive and transmit queues */
1067 static void gfar_halt(struct gfar_private *priv)
1068 {
1069 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1070 	u32 tempval;
1071 
1072 	/* Dissable the Rx/Tx hw queues */
1073 	gfar_write(&regs->rqueue, 0);
1074 	gfar_write(&regs->tqueue, 0);
1075 
1076 	mdelay(10);
1077 
1078 	gfar_halt_nodisable(priv);
1079 
1080 	/* Disable Rx/Tx DMA */
1081 	tempval = gfar_read(&regs->maccfg1);
1082 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1083 	gfar_write(&regs->maccfg1, tempval);
1084 }
1085 
1086 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1087 {
1088 	struct txbd8 *txbdp;
1089 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1090 	int i, j;
1091 
1092 	txbdp = tx_queue->tx_bd_base;
1093 
1094 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1095 		if (!tx_queue->tx_skbuff[i])
1096 			continue;
1097 
1098 		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1099 				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1100 		txbdp->lstatus = 0;
1101 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1102 		     j++) {
1103 			txbdp++;
1104 			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1105 				       be16_to_cpu(txbdp->length),
1106 				       DMA_TO_DEVICE);
1107 		}
1108 		txbdp++;
1109 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1110 		tx_queue->tx_skbuff[i] = NULL;
1111 	}
1112 	kfree(tx_queue->tx_skbuff);
1113 	tx_queue->tx_skbuff = NULL;
1114 }
1115 
1116 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1117 {
1118 	int i;
1119 
1120 	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1121 
1122 	dev_kfree_skb(rx_queue->skb);
1123 
1124 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1125 		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1126 
1127 		rxbdp->lstatus = 0;
1128 		rxbdp->bufPtr = 0;
1129 		rxbdp++;
1130 
1131 		if (!rxb->page)
1132 			continue;
1133 
1134 		dma_unmap_page(rx_queue->dev, rxb->dma,
1135 			       PAGE_SIZE, DMA_FROM_DEVICE);
1136 		__free_page(rxb->page);
1137 
1138 		rxb->page = NULL;
1139 	}
1140 
1141 	kfree(rx_queue->rx_buff);
1142 	rx_queue->rx_buff = NULL;
1143 }
1144 
1145 /* If there are any tx skbs or rx skbs still around, free them.
1146  * Then free tx_skbuff and rx_skbuff
1147  */
1148 static void free_skb_resources(struct gfar_private *priv)
1149 {
1150 	struct gfar_priv_tx_q *tx_queue = NULL;
1151 	struct gfar_priv_rx_q *rx_queue = NULL;
1152 	int i;
1153 
1154 	/* Go through all the buffer descriptors and free their data buffers */
1155 	for (i = 0; i < priv->num_tx_queues; i++) {
1156 		struct netdev_queue *txq;
1157 
1158 		tx_queue = priv->tx_queue[i];
1159 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1160 		if (tx_queue->tx_skbuff)
1161 			free_skb_tx_queue(tx_queue);
1162 		netdev_tx_reset_queue(txq);
1163 	}
1164 
1165 	for (i = 0; i < priv->num_rx_queues; i++) {
1166 		rx_queue = priv->rx_queue[i];
1167 		if (rx_queue->rx_buff)
1168 			free_skb_rx_queue(rx_queue);
1169 	}
1170 
1171 	dma_free_coherent(priv->dev,
1172 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1173 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1174 			  priv->tx_queue[0]->tx_bd_base,
1175 			  priv->tx_queue[0]->tx_bd_dma_base);
1176 }
1177 
1178 void stop_gfar(struct net_device *dev)
1179 {
1180 	struct gfar_private *priv = netdev_priv(dev);
1181 
1182 	netif_tx_stop_all_queues(dev);
1183 
1184 	smp_mb__before_atomic();
1185 	set_bit(GFAR_DOWN, &priv->state);
1186 	smp_mb__after_atomic();
1187 
1188 	disable_napi(priv);
1189 
1190 	/* disable ints and gracefully shut down Rx/Tx DMA */
1191 	gfar_halt(priv);
1192 
1193 	phy_stop(dev->phydev);
1194 
1195 	free_skb_resources(priv);
1196 }
1197 
1198 static void gfar_start(struct gfar_private *priv)
1199 {
1200 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1201 	u32 tempval;
1202 	int i = 0;
1203 
1204 	/* Enable Rx/Tx hw queues */
1205 	gfar_write(&regs->rqueue, priv->rqueue);
1206 	gfar_write(&regs->tqueue, priv->tqueue);
1207 
1208 	/* Initialize DMACTRL to have WWR and WOP */
1209 	tempval = gfar_read(&regs->dmactrl);
1210 	tempval |= DMACTRL_INIT_SETTINGS;
1211 	gfar_write(&regs->dmactrl, tempval);
1212 
1213 	/* Make sure we aren't stopped */
1214 	tempval = gfar_read(&regs->dmactrl);
1215 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1216 	gfar_write(&regs->dmactrl, tempval);
1217 
1218 	for (i = 0; i < priv->num_grps; i++) {
1219 		regs = priv->gfargrp[i].regs;
1220 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1221 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1222 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1223 	}
1224 
1225 	/* Enable Rx/Tx DMA */
1226 	tempval = gfar_read(&regs->maccfg1);
1227 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1228 	gfar_write(&regs->maccfg1, tempval);
1229 
1230 	gfar_ints_enable(priv);
1231 
1232 	netif_trans_update(priv->ndev); /* prevent tx timeout */
1233 }
1234 
1235 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1236 {
1237 	struct page *page;
1238 	dma_addr_t addr;
1239 
1240 	page = dev_alloc_page();
1241 	if (unlikely(!page))
1242 		return false;
1243 
1244 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1245 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1246 		__free_page(page);
1247 
1248 		return false;
1249 	}
1250 
1251 	rxb->dma = addr;
1252 	rxb->page = page;
1253 	rxb->page_offset = 0;
1254 
1255 	return true;
1256 }
1257 
1258 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1259 {
1260 	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1261 	struct gfar_extra_stats *estats = &priv->extra_stats;
1262 
1263 	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1264 	atomic64_inc(&estats->rx_alloc_err);
1265 }
1266 
1267 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1268 				int alloc_cnt)
1269 {
1270 	struct rxbd8 *bdp;
1271 	struct gfar_rx_buff *rxb;
1272 	int i;
1273 
1274 	i = rx_queue->next_to_use;
1275 	bdp = &rx_queue->rx_bd_base[i];
1276 	rxb = &rx_queue->rx_buff[i];
1277 
1278 	while (alloc_cnt--) {
1279 		/* try reuse page */
1280 		if (unlikely(!rxb->page)) {
1281 			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1282 				gfar_rx_alloc_err(rx_queue);
1283 				break;
1284 			}
1285 		}
1286 
1287 		/* Setup the new RxBD */
1288 		gfar_init_rxbdp(rx_queue, bdp,
1289 				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1290 
1291 		/* Update to the next pointer */
1292 		bdp++;
1293 		rxb++;
1294 
1295 		if (unlikely(++i == rx_queue->rx_ring_size)) {
1296 			i = 0;
1297 			bdp = rx_queue->rx_bd_base;
1298 			rxb = rx_queue->rx_buff;
1299 		}
1300 	}
1301 
1302 	rx_queue->next_to_use = i;
1303 	rx_queue->next_to_alloc = i;
1304 }
1305 
1306 static void gfar_init_bds(struct net_device *ndev)
1307 {
1308 	struct gfar_private *priv = netdev_priv(ndev);
1309 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1310 	struct gfar_priv_tx_q *tx_queue = NULL;
1311 	struct gfar_priv_rx_q *rx_queue = NULL;
1312 	struct txbd8 *txbdp;
1313 	u32 __iomem *rfbptr;
1314 	int i, j;
1315 
1316 	for (i = 0; i < priv->num_tx_queues; i++) {
1317 		tx_queue = priv->tx_queue[i];
1318 		/* Initialize some variables in our dev structure */
1319 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1320 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
1321 		tx_queue->cur_tx = tx_queue->tx_bd_base;
1322 		tx_queue->skb_curtx = 0;
1323 		tx_queue->skb_dirtytx = 0;
1324 
1325 		/* Initialize Transmit Descriptor Ring */
1326 		txbdp = tx_queue->tx_bd_base;
1327 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
1328 			txbdp->lstatus = 0;
1329 			txbdp->bufPtr = 0;
1330 			txbdp++;
1331 		}
1332 
1333 		/* Set the last descriptor in the ring to indicate wrap */
1334 		txbdp--;
1335 		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1336 					    TXBD_WRAP);
1337 	}
1338 
1339 	rfbptr = &regs->rfbptr0;
1340 	for (i = 0; i < priv->num_rx_queues; i++) {
1341 		rx_queue = priv->rx_queue[i];
1342 
1343 		rx_queue->next_to_clean = 0;
1344 		rx_queue->next_to_use = 0;
1345 		rx_queue->next_to_alloc = 0;
1346 
1347 		/* make sure next_to_clean != next_to_use after this
1348 		 * by leaving at least 1 unused descriptor
1349 		 */
1350 		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1351 
1352 		rx_queue->rfbptr = rfbptr;
1353 		rfbptr += 2;
1354 	}
1355 }
1356 
1357 static int gfar_alloc_skb_resources(struct net_device *ndev)
1358 {
1359 	void *vaddr;
1360 	dma_addr_t addr;
1361 	int i, j;
1362 	struct gfar_private *priv = netdev_priv(ndev);
1363 	struct device *dev = priv->dev;
1364 	struct gfar_priv_tx_q *tx_queue = NULL;
1365 	struct gfar_priv_rx_q *rx_queue = NULL;
1366 
1367 	priv->total_tx_ring_size = 0;
1368 	for (i = 0; i < priv->num_tx_queues; i++)
1369 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1370 
1371 	priv->total_rx_ring_size = 0;
1372 	for (i = 0; i < priv->num_rx_queues; i++)
1373 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1374 
1375 	/* Allocate memory for the buffer descriptors */
1376 	vaddr = dma_alloc_coherent(dev,
1377 				   (priv->total_tx_ring_size *
1378 				    sizeof(struct txbd8)) +
1379 				   (priv->total_rx_ring_size *
1380 				    sizeof(struct rxbd8)),
1381 				   &addr, GFP_KERNEL);
1382 	if (!vaddr)
1383 		return -ENOMEM;
1384 
1385 	for (i = 0; i < priv->num_tx_queues; i++) {
1386 		tx_queue = priv->tx_queue[i];
1387 		tx_queue->tx_bd_base = vaddr;
1388 		tx_queue->tx_bd_dma_base = addr;
1389 		tx_queue->dev = ndev;
1390 		/* enet DMA only understands physical addresses */
1391 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1392 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1393 	}
1394 
1395 	/* Start the rx descriptor ring where the tx ring leaves off */
1396 	for (i = 0; i < priv->num_rx_queues; i++) {
1397 		rx_queue = priv->rx_queue[i];
1398 		rx_queue->rx_bd_base = vaddr;
1399 		rx_queue->rx_bd_dma_base = addr;
1400 		rx_queue->ndev = ndev;
1401 		rx_queue->dev = dev;
1402 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1403 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1404 	}
1405 
1406 	/* Setup the skbuff rings */
1407 	for (i = 0; i < priv->num_tx_queues; i++) {
1408 		tx_queue = priv->tx_queue[i];
1409 		tx_queue->tx_skbuff =
1410 			kmalloc_array(tx_queue->tx_ring_size,
1411 				      sizeof(*tx_queue->tx_skbuff),
1412 				      GFP_KERNEL);
1413 		if (!tx_queue->tx_skbuff)
1414 			goto cleanup;
1415 
1416 		for (j = 0; j < tx_queue->tx_ring_size; j++)
1417 			tx_queue->tx_skbuff[j] = NULL;
1418 	}
1419 
1420 	for (i = 0; i < priv->num_rx_queues; i++) {
1421 		rx_queue = priv->rx_queue[i];
1422 		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1423 					    sizeof(*rx_queue->rx_buff),
1424 					    GFP_KERNEL);
1425 		if (!rx_queue->rx_buff)
1426 			goto cleanup;
1427 	}
1428 
1429 	gfar_init_bds(ndev);
1430 
1431 	return 0;
1432 
1433 cleanup:
1434 	free_skb_resources(priv);
1435 	return -ENOMEM;
1436 }
1437 
1438 /* Bring the controller up and running */
1439 int startup_gfar(struct net_device *ndev)
1440 {
1441 	struct gfar_private *priv = netdev_priv(ndev);
1442 	int err;
1443 
1444 	gfar_mac_reset(priv);
1445 
1446 	err = gfar_alloc_skb_resources(ndev);
1447 	if (err)
1448 		return err;
1449 
1450 	gfar_init_tx_rx_base(priv);
1451 
1452 	smp_mb__before_atomic();
1453 	clear_bit(GFAR_DOWN, &priv->state);
1454 	smp_mb__after_atomic();
1455 
1456 	/* Start Rx/Tx DMA and enable the interrupts */
1457 	gfar_start(priv);
1458 
1459 	/* force link state update after mac reset */
1460 	priv->oldlink = 0;
1461 	priv->oldspeed = 0;
1462 	priv->oldduplex = -1;
1463 
1464 	phy_start(ndev->phydev);
1465 
1466 	enable_napi(priv);
1467 
1468 	netif_tx_wake_all_queues(ndev);
1469 
1470 	return 0;
1471 }
1472 
1473 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1474 {
1475 	struct net_device *ndev = priv->ndev;
1476 	struct phy_device *phydev = ndev->phydev;
1477 	u32 val = 0;
1478 
1479 	if (!phydev->duplex)
1480 		return val;
1481 
1482 	if (!priv->pause_aneg_en) {
1483 		if (priv->tx_pause_en)
1484 			val |= MACCFG1_TX_FLOW;
1485 		if (priv->rx_pause_en)
1486 			val |= MACCFG1_RX_FLOW;
1487 	} else {
1488 		u16 lcl_adv, rmt_adv;
1489 		u8 flowctrl;
1490 		/* get link partner capabilities */
1491 		rmt_adv = 0;
1492 		if (phydev->pause)
1493 			rmt_adv = LPA_PAUSE_CAP;
1494 		if (phydev->asym_pause)
1495 			rmt_adv |= LPA_PAUSE_ASYM;
1496 
1497 		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1498 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1499 		if (flowctrl & FLOW_CTRL_TX)
1500 			val |= MACCFG1_TX_FLOW;
1501 		if (flowctrl & FLOW_CTRL_RX)
1502 			val |= MACCFG1_RX_FLOW;
1503 	}
1504 
1505 	return val;
1506 }
1507 
1508 static noinline void gfar_update_link_state(struct gfar_private *priv)
1509 {
1510 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1511 	struct net_device *ndev = priv->ndev;
1512 	struct phy_device *phydev = ndev->phydev;
1513 	struct gfar_priv_rx_q *rx_queue = NULL;
1514 	int i;
1515 
1516 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1517 		return;
1518 
1519 	if (phydev->link) {
1520 		u32 tempval1 = gfar_read(&regs->maccfg1);
1521 		u32 tempval = gfar_read(&regs->maccfg2);
1522 		u32 ecntrl = gfar_read(&regs->ecntrl);
1523 		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1524 
1525 		if (phydev->duplex != priv->oldduplex) {
1526 			if (!(phydev->duplex))
1527 				tempval &= ~(MACCFG2_FULL_DUPLEX);
1528 			else
1529 				tempval |= MACCFG2_FULL_DUPLEX;
1530 
1531 			priv->oldduplex = phydev->duplex;
1532 		}
1533 
1534 		if (phydev->speed != priv->oldspeed) {
1535 			switch (phydev->speed) {
1536 			case 1000:
1537 				tempval =
1538 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1539 
1540 				ecntrl &= ~(ECNTRL_R100);
1541 				break;
1542 			case 100:
1543 			case 10:
1544 				tempval =
1545 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1546 
1547 				/* Reduced mode distinguishes
1548 				 * between 10 and 100
1549 				 */
1550 				if (phydev->speed == SPEED_100)
1551 					ecntrl |= ECNTRL_R100;
1552 				else
1553 					ecntrl &= ~(ECNTRL_R100);
1554 				break;
1555 			default:
1556 				netif_warn(priv, link, priv->ndev,
1557 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
1558 					   phydev->speed);
1559 				break;
1560 			}
1561 
1562 			priv->oldspeed = phydev->speed;
1563 		}
1564 
1565 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1566 		tempval1 |= gfar_get_flowctrl_cfg(priv);
1567 
1568 		/* Turn last free buffer recording on */
1569 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1570 			for (i = 0; i < priv->num_rx_queues; i++) {
1571 				u32 bdp_dma;
1572 
1573 				rx_queue = priv->rx_queue[i];
1574 				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1575 				gfar_write(rx_queue->rfbptr, bdp_dma);
1576 			}
1577 
1578 			priv->tx_actual_en = 1;
1579 		}
1580 
1581 		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1582 			priv->tx_actual_en = 0;
1583 
1584 		gfar_write(&regs->maccfg1, tempval1);
1585 		gfar_write(&regs->maccfg2, tempval);
1586 		gfar_write(&regs->ecntrl, ecntrl);
1587 
1588 		if (!priv->oldlink)
1589 			priv->oldlink = 1;
1590 
1591 	} else if (priv->oldlink) {
1592 		priv->oldlink = 0;
1593 		priv->oldspeed = 0;
1594 		priv->oldduplex = -1;
1595 	}
1596 
1597 	if (netif_msg_link(priv))
1598 		phy_print_status(phydev);
1599 }
1600 
1601 /* Called every time the controller might need to be made
1602  * aware of new link state.  The PHY code conveys this
1603  * information through variables in the phydev structure, and this
1604  * function converts those variables into the appropriate
1605  * register values, and can bring down the device if needed.
1606  */
1607 static void adjust_link(struct net_device *dev)
1608 {
1609 	struct gfar_private *priv = netdev_priv(dev);
1610 	struct phy_device *phydev = dev->phydev;
1611 
1612 	if (unlikely(phydev->link != priv->oldlink ||
1613 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
1614 				       phydev->speed != priv->oldspeed))))
1615 		gfar_update_link_state(priv);
1616 }
1617 
1618 /* Initialize TBI PHY interface for communicating with the
1619  * SERDES lynx PHY on the chip.  We communicate with this PHY
1620  * through the MDIO bus on each controller, treating it as a
1621  * "normal" PHY at the address found in the TBIPA register.  We assume
1622  * that the TBIPA register is valid.  Either the MDIO bus code will set
1623  * it to a value that doesn't conflict with other PHYs on the bus, or the
1624  * value doesn't matter, as there are no other PHYs on the bus.
1625  */
1626 static void gfar_configure_serdes(struct net_device *dev)
1627 {
1628 	struct gfar_private *priv = netdev_priv(dev);
1629 	struct phy_device *tbiphy;
1630 
1631 	if (!priv->tbi_node) {
1632 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1633 				    "device tree specify a tbi-handle\n");
1634 		return;
1635 	}
1636 
1637 	tbiphy = of_phy_find_device(priv->tbi_node);
1638 	if (!tbiphy) {
1639 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1640 		return;
1641 	}
1642 
1643 	/* If the link is already up, we must already be ok, and don't need to
1644 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1645 	 * everything for us?  Resetting it takes the link down and requires
1646 	 * several seconds for it to come back.
1647 	 */
1648 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1649 		put_device(&tbiphy->mdio.dev);
1650 		return;
1651 	}
1652 
1653 	/* Single clk mode, mii mode off(for serdes communication) */
1654 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1655 
1656 	phy_write(tbiphy, MII_ADVERTISE,
1657 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1658 		  ADVERTISE_1000XPSE_ASYM);
1659 
1660 	phy_write(tbiphy, MII_BMCR,
1661 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1662 		  BMCR_SPEED1000);
1663 
1664 	put_device(&tbiphy->mdio.dev);
1665 }
1666 
1667 /* Initializes driver's PHY state, and attaches to the PHY.
1668  * Returns 0 on success.
1669  */
1670 static int init_phy(struct net_device *dev)
1671 {
1672 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1673 	struct gfar_private *priv = netdev_priv(dev);
1674 	phy_interface_t interface = priv->interface;
1675 	struct phy_device *phydev;
1676 	struct ethtool_eee edata;
1677 
1678 	linkmode_set_bit_array(phy_10_100_features_array,
1679 			       ARRAY_SIZE(phy_10_100_features_array),
1680 			       mask);
1681 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1682 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1683 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1684 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1685 
1686 	priv->oldlink = 0;
1687 	priv->oldspeed = 0;
1688 	priv->oldduplex = -1;
1689 
1690 	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1691 				interface);
1692 	if (!phydev) {
1693 		dev_err(&dev->dev, "could not attach to PHY\n");
1694 		return -ENODEV;
1695 	}
1696 
1697 	if (interface == PHY_INTERFACE_MODE_SGMII)
1698 		gfar_configure_serdes(dev);
1699 
1700 	/* Remove any features not supported by the controller */
1701 	linkmode_and(phydev->supported, phydev->supported, mask);
1702 	linkmode_copy(phydev->advertising, phydev->supported);
1703 
1704 	/* Add support for flow control */
1705 	phy_support_asym_pause(phydev);
1706 
1707 	/* disable EEE autoneg, EEE not supported by eTSEC */
1708 	memset(&edata, 0, sizeof(struct ethtool_eee));
1709 	phy_ethtool_set_eee(phydev, &edata);
1710 
1711 	return 0;
1712 }
1713 
1714 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1715 {
1716 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1717 
1718 	memset(fcb, 0, GMAC_FCB_LEN);
1719 
1720 	return fcb;
1721 }
1722 
1723 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1724 				    int fcb_length)
1725 {
1726 	/* If we're here, it's a IP packet with a TCP or UDP
1727 	 * payload.  We set it to checksum, using a pseudo-header
1728 	 * we provide
1729 	 */
1730 	u8 flags = TXFCB_DEFAULT;
1731 
1732 	/* Tell the controller what the protocol is
1733 	 * And provide the already calculated phcs
1734 	 */
1735 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1736 		flags |= TXFCB_UDP;
1737 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1738 	} else
1739 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1740 
1741 	/* l3os is the distance between the start of the
1742 	 * frame (skb->data) and the start of the IP hdr.
1743 	 * l4os is the distance between the start of the
1744 	 * l3 hdr and the l4 hdr
1745 	 */
1746 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1747 	fcb->l4os = skb_network_header_len(skb);
1748 
1749 	fcb->flags = flags;
1750 }
1751 
1752 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1753 {
1754 	fcb->flags |= TXFCB_VLN;
1755 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1756 }
1757 
1758 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1759 				      struct txbd8 *base, int ring_size)
1760 {
1761 	struct txbd8 *new_bd = bdp + stride;
1762 
1763 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1764 }
1765 
1766 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1767 				      int ring_size)
1768 {
1769 	return skip_txbd(bdp, 1, base, ring_size);
1770 }
1771 
1772 /* eTSEC12: csum generation not supported for some fcb offsets */
1773 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1774 				       unsigned long fcb_addr)
1775 {
1776 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1777 	       (fcb_addr % 0x20) > 0x18);
1778 }
1779 
1780 /* eTSEC76: csum generation for frames larger than 2500 may
1781  * cause excess delays before start of transmission
1782  */
1783 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1784 				       unsigned int len)
1785 {
1786 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1787 	       (len > 2500));
1788 }
1789 
1790 /* This is called by the kernel when a frame is ready for transmission.
1791  * It is pointed to by the dev->hard_start_xmit function pointer
1792  */
1793 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1794 {
1795 	struct gfar_private *priv = netdev_priv(dev);
1796 	struct gfar_priv_tx_q *tx_queue = NULL;
1797 	struct netdev_queue *txq;
1798 	struct gfar __iomem *regs = NULL;
1799 	struct txfcb *fcb = NULL;
1800 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1801 	u32 lstatus;
1802 	skb_frag_t *frag;
1803 	int i, rq = 0;
1804 	int do_tstamp, do_csum, do_vlan;
1805 	u32 bufaddr;
1806 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1807 
1808 	rq = skb->queue_mapping;
1809 	tx_queue = priv->tx_queue[rq];
1810 	txq = netdev_get_tx_queue(dev, rq);
1811 	base = tx_queue->tx_bd_base;
1812 	regs = tx_queue->grp->regs;
1813 
1814 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1815 	do_vlan = skb_vlan_tag_present(skb);
1816 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1817 		    priv->hwts_tx_en;
1818 
1819 	if (do_csum || do_vlan)
1820 		fcb_len = GMAC_FCB_LEN;
1821 
1822 	/* check if time stamp should be generated */
1823 	if (unlikely(do_tstamp))
1824 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1825 
1826 	/* make space for additional header when fcb is needed */
1827 	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
1828 		struct sk_buff *skb_new;
1829 
1830 		skb_new = skb_realloc_headroom(skb, fcb_len);
1831 		if (!skb_new) {
1832 			dev->stats.tx_errors++;
1833 			dev_kfree_skb_any(skb);
1834 			return NETDEV_TX_OK;
1835 		}
1836 
1837 		if (skb->sk)
1838 			skb_set_owner_w(skb_new, skb->sk);
1839 		dev_consume_skb_any(skb);
1840 		skb = skb_new;
1841 	}
1842 
1843 	/* total number of fragments in the SKB */
1844 	nr_frags = skb_shinfo(skb)->nr_frags;
1845 
1846 	/* calculate the required number of TxBDs for this skb */
1847 	if (unlikely(do_tstamp))
1848 		nr_txbds = nr_frags + 2;
1849 	else
1850 		nr_txbds = nr_frags + 1;
1851 
1852 	/* check if there is space to queue this packet */
1853 	if (nr_txbds > tx_queue->num_txbdfree) {
1854 		/* no space, stop the queue */
1855 		netif_tx_stop_queue(txq);
1856 		dev->stats.tx_fifo_errors++;
1857 		return NETDEV_TX_BUSY;
1858 	}
1859 
1860 	/* Update transmit stats */
1861 	bytes_sent = skb->len;
1862 	tx_queue->stats.tx_bytes += bytes_sent;
1863 	/* keep Tx bytes on wire for BQL accounting */
1864 	GFAR_CB(skb)->bytes_sent = bytes_sent;
1865 	tx_queue->stats.tx_packets++;
1866 
1867 	txbdp = txbdp_start = tx_queue->cur_tx;
1868 	lstatus = be32_to_cpu(txbdp->lstatus);
1869 
1870 	/* Add TxPAL between FCB and frame if required */
1871 	if (unlikely(do_tstamp)) {
1872 		skb_push(skb, GMAC_TXPAL_LEN);
1873 		memset(skb->data, 0, GMAC_TXPAL_LEN);
1874 	}
1875 
1876 	/* Add TxFCB if required */
1877 	if (fcb_len) {
1878 		fcb = gfar_add_fcb(skb);
1879 		lstatus |= BD_LFLAG(TXBD_TOE);
1880 	}
1881 
1882 	/* Set up checksumming */
1883 	if (do_csum) {
1884 		gfar_tx_checksum(skb, fcb, fcb_len);
1885 
1886 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1887 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
1888 			__skb_pull(skb, GMAC_FCB_LEN);
1889 			skb_checksum_help(skb);
1890 			if (do_vlan || do_tstamp) {
1891 				/* put back a new fcb for vlan/tstamp TOE */
1892 				fcb = gfar_add_fcb(skb);
1893 			} else {
1894 				/* Tx TOE not used */
1895 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
1896 				fcb = NULL;
1897 			}
1898 		}
1899 	}
1900 
1901 	if (do_vlan)
1902 		gfar_tx_vlan(skb, fcb);
1903 
1904 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1905 				 DMA_TO_DEVICE);
1906 	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1907 		goto dma_map_err;
1908 
1909 	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1910 
1911 	/* Time stamp insertion requires one additional TxBD */
1912 	if (unlikely(do_tstamp))
1913 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1914 						 tx_queue->tx_ring_size);
1915 
1916 	if (likely(!nr_frags)) {
1917 		if (likely(!do_tstamp))
1918 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1919 	} else {
1920 		u32 lstatus_start = lstatus;
1921 
1922 		/* Place the fragment addresses and lengths into the TxBDs */
1923 		frag = &skb_shinfo(skb)->frags[0];
1924 		for (i = 0; i < nr_frags; i++, frag++) {
1925 			unsigned int size;
1926 
1927 			/* Point at the next BD, wrapping as needed */
1928 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1929 
1930 			size = skb_frag_size(frag);
1931 
1932 			lstatus = be32_to_cpu(txbdp->lstatus) | size |
1933 				  BD_LFLAG(TXBD_READY);
1934 
1935 			/* Handle the last BD specially */
1936 			if (i == nr_frags - 1)
1937 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1938 
1939 			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1940 						   size, DMA_TO_DEVICE);
1941 			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1942 				goto dma_map_err;
1943 
1944 			/* set the TxBD length and buffer pointer */
1945 			txbdp->bufPtr = cpu_to_be32(bufaddr);
1946 			txbdp->lstatus = cpu_to_be32(lstatus);
1947 		}
1948 
1949 		lstatus = lstatus_start;
1950 	}
1951 
1952 	/* If time stamping is requested one additional TxBD must be set up. The
1953 	 * first TxBD points to the FCB and must have a data length of
1954 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1955 	 * the full frame length.
1956 	 */
1957 	if (unlikely(do_tstamp)) {
1958 		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1959 
1960 		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1961 		bufaddr += fcb_len;
1962 
1963 		lstatus_ts |= BD_LFLAG(TXBD_READY) |
1964 			      (skb_headlen(skb) - fcb_len);
1965 		if (!nr_frags)
1966 			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1967 
1968 		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1969 		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1970 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1971 
1972 		/* Setup tx hardware time stamping */
1973 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1974 		fcb->ptp = 1;
1975 	} else {
1976 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1977 	}
1978 
1979 	netdev_tx_sent_queue(txq, bytes_sent);
1980 
1981 	gfar_wmb();
1982 
1983 	txbdp_start->lstatus = cpu_to_be32(lstatus);
1984 
1985 	gfar_wmb(); /* force lstatus write before tx_skbuff */
1986 
1987 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1988 
1989 	/* Update the current skb pointer to the next entry we will use
1990 	 * (wrapping if necessary)
1991 	 */
1992 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1993 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1994 
1995 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1996 
1997 	/* We can work in parallel with gfar_clean_tx_ring(), except
1998 	 * when modifying num_txbdfree. Note that we didn't grab the lock
1999 	 * when we were reading the num_txbdfree and checking for available
2000 	 * space, that's because outside of this function it can only grow.
2001 	 */
2002 	spin_lock_bh(&tx_queue->txlock);
2003 	/* reduce TxBD free count */
2004 	tx_queue->num_txbdfree -= (nr_txbds);
2005 	spin_unlock_bh(&tx_queue->txlock);
2006 
2007 	/* If the next BD still needs to be cleaned up, then the bds
2008 	 * are full.  We need to tell the kernel to stop sending us stuff.
2009 	 */
2010 	if (!tx_queue->num_txbdfree) {
2011 		netif_tx_stop_queue(txq);
2012 
2013 		dev->stats.tx_fifo_errors++;
2014 	}
2015 
2016 	/* Tell the DMA to go go go */
2017 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2018 
2019 	return NETDEV_TX_OK;
2020 
2021 dma_map_err:
2022 	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2023 	if (do_tstamp)
2024 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2025 	for (i = 0; i < nr_frags; i++) {
2026 		lstatus = be32_to_cpu(txbdp->lstatus);
2027 		if (!(lstatus & BD_LFLAG(TXBD_READY)))
2028 			break;
2029 
2030 		lstatus &= ~BD_LFLAG(TXBD_READY);
2031 		txbdp->lstatus = cpu_to_be32(lstatus);
2032 		bufaddr = be32_to_cpu(txbdp->bufPtr);
2033 		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2034 			       DMA_TO_DEVICE);
2035 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2036 	}
2037 	gfar_wmb();
2038 	dev_kfree_skb_any(skb);
2039 	return NETDEV_TX_OK;
2040 }
2041 
2042 /* Changes the mac address if the controller is not running. */
2043 static int gfar_set_mac_address(struct net_device *dev)
2044 {
2045 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2046 
2047 	return 0;
2048 }
2049 
2050 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2051 {
2052 	struct gfar_private *priv = netdev_priv(dev);
2053 
2054 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2055 		cpu_relax();
2056 
2057 	if (dev->flags & IFF_UP)
2058 		stop_gfar(dev);
2059 
2060 	dev->mtu = new_mtu;
2061 
2062 	if (dev->flags & IFF_UP)
2063 		startup_gfar(dev);
2064 
2065 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2066 
2067 	return 0;
2068 }
2069 
2070 static void reset_gfar(struct net_device *ndev)
2071 {
2072 	struct gfar_private *priv = netdev_priv(ndev);
2073 
2074 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2075 		cpu_relax();
2076 
2077 	stop_gfar(ndev);
2078 	startup_gfar(ndev);
2079 
2080 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2081 }
2082 
2083 /* gfar_reset_task gets scheduled when a packet has not been
2084  * transmitted after a set amount of time.
2085  * For now, assume that clearing out all the structures, and
2086  * starting over will fix the problem.
2087  */
2088 static void gfar_reset_task(struct work_struct *work)
2089 {
2090 	struct gfar_private *priv = container_of(work, struct gfar_private,
2091 						 reset_task);
2092 	reset_gfar(priv->ndev);
2093 }
2094 
2095 static void gfar_timeout(struct net_device *dev)
2096 {
2097 	struct gfar_private *priv = netdev_priv(dev);
2098 
2099 	dev->stats.tx_errors++;
2100 	schedule_work(&priv->reset_task);
2101 }
2102 
2103 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2104 {
2105 	struct hwtstamp_config config;
2106 	struct gfar_private *priv = netdev_priv(netdev);
2107 
2108 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2109 		return -EFAULT;
2110 
2111 	/* reserved for future extensions */
2112 	if (config.flags)
2113 		return -EINVAL;
2114 
2115 	switch (config.tx_type) {
2116 	case HWTSTAMP_TX_OFF:
2117 		priv->hwts_tx_en = 0;
2118 		break;
2119 	case HWTSTAMP_TX_ON:
2120 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2121 			return -ERANGE;
2122 		priv->hwts_tx_en = 1;
2123 		break;
2124 	default:
2125 		return -ERANGE;
2126 	}
2127 
2128 	switch (config.rx_filter) {
2129 	case HWTSTAMP_FILTER_NONE:
2130 		if (priv->hwts_rx_en) {
2131 			priv->hwts_rx_en = 0;
2132 			reset_gfar(netdev);
2133 		}
2134 		break;
2135 	default:
2136 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2137 			return -ERANGE;
2138 		if (!priv->hwts_rx_en) {
2139 			priv->hwts_rx_en = 1;
2140 			reset_gfar(netdev);
2141 		}
2142 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2143 		break;
2144 	}
2145 
2146 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2147 		-EFAULT : 0;
2148 }
2149 
2150 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2151 {
2152 	struct hwtstamp_config config;
2153 	struct gfar_private *priv = netdev_priv(netdev);
2154 
2155 	config.flags = 0;
2156 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2157 	config.rx_filter = (priv->hwts_rx_en ?
2158 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2159 
2160 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2161 		-EFAULT : 0;
2162 }
2163 
2164 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2165 {
2166 	struct phy_device *phydev = dev->phydev;
2167 
2168 	if (!netif_running(dev))
2169 		return -EINVAL;
2170 
2171 	if (cmd == SIOCSHWTSTAMP)
2172 		return gfar_hwtstamp_set(dev, rq);
2173 	if (cmd == SIOCGHWTSTAMP)
2174 		return gfar_hwtstamp_get(dev, rq);
2175 
2176 	if (!phydev)
2177 		return -ENODEV;
2178 
2179 	return phy_mii_ioctl(phydev, rq, cmd);
2180 }
2181 
2182 /* Interrupt Handler for Transmit complete */
2183 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2184 {
2185 	struct net_device *dev = tx_queue->dev;
2186 	struct netdev_queue *txq;
2187 	struct gfar_private *priv = netdev_priv(dev);
2188 	struct txbd8 *bdp, *next = NULL;
2189 	struct txbd8 *lbdp = NULL;
2190 	struct txbd8 *base = tx_queue->tx_bd_base;
2191 	struct sk_buff *skb;
2192 	int skb_dirtytx;
2193 	int tx_ring_size = tx_queue->tx_ring_size;
2194 	int frags = 0, nr_txbds = 0;
2195 	int i;
2196 	int howmany = 0;
2197 	int tqi = tx_queue->qindex;
2198 	unsigned int bytes_sent = 0;
2199 	u32 lstatus;
2200 	size_t buflen;
2201 
2202 	txq = netdev_get_tx_queue(dev, tqi);
2203 	bdp = tx_queue->dirty_tx;
2204 	skb_dirtytx = tx_queue->skb_dirtytx;
2205 
2206 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2207 
2208 		frags = skb_shinfo(skb)->nr_frags;
2209 
2210 		/* When time stamping, one additional TxBD must be freed.
2211 		 * Also, we need to dma_unmap_single() the TxPAL.
2212 		 */
2213 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2214 			nr_txbds = frags + 2;
2215 		else
2216 			nr_txbds = frags + 1;
2217 
2218 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2219 
2220 		lstatus = be32_to_cpu(lbdp->lstatus);
2221 
2222 		/* Only clean completed frames */
2223 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2224 		    (lstatus & BD_LENGTH_MASK))
2225 			break;
2226 
2227 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2228 			next = next_txbd(bdp, base, tx_ring_size);
2229 			buflen = be16_to_cpu(next->length) +
2230 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2231 		} else
2232 			buflen = be16_to_cpu(bdp->length);
2233 
2234 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2235 				 buflen, DMA_TO_DEVICE);
2236 
2237 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2238 			struct skb_shared_hwtstamps shhwtstamps;
2239 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2240 					  ~0x7UL);
2241 
2242 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2243 			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2244 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2245 			skb_tstamp_tx(skb, &shhwtstamps);
2246 			gfar_clear_txbd_status(bdp);
2247 			bdp = next;
2248 		}
2249 
2250 		gfar_clear_txbd_status(bdp);
2251 		bdp = next_txbd(bdp, base, tx_ring_size);
2252 
2253 		for (i = 0; i < frags; i++) {
2254 			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2255 				       be16_to_cpu(bdp->length),
2256 				       DMA_TO_DEVICE);
2257 			gfar_clear_txbd_status(bdp);
2258 			bdp = next_txbd(bdp, base, tx_ring_size);
2259 		}
2260 
2261 		bytes_sent += GFAR_CB(skb)->bytes_sent;
2262 
2263 		dev_kfree_skb_any(skb);
2264 
2265 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2266 
2267 		skb_dirtytx = (skb_dirtytx + 1) &
2268 			      TX_RING_MOD_MASK(tx_ring_size);
2269 
2270 		howmany++;
2271 		spin_lock(&tx_queue->txlock);
2272 		tx_queue->num_txbdfree += nr_txbds;
2273 		spin_unlock(&tx_queue->txlock);
2274 	}
2275 
2276 	/* If we freed a buffer, we can restart transmission, if necessary */
2277 	if (tx_queue->num_txbdfree &&
2278 	    netif_tx_queue_stopped(txq) &&
2279 	    !(test_bit(GFAR_DOWN, &priv->state)))
2280 		netif_wake_subqueue(priv->ndev, tqi);
2281 
2282 	/* Update dirty indicators */
2283 	tx_queue->skb_dirtytx = skb_dirtytx;
2284 	tx_queue->dirty_tx = bdp;
2285 
2286 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2287 }
2288 
2289 static void count_errors(u32 lstatus, struct net_device *ndev)
2290 {
2291 	struct gfar_private *priv = netdev_priv(ndev);
2292 	struct net_device_stats *stats = &ndev->stats;
2293 	struct gfar_extra_stats *estats = &priv->extra_stats;
2294 
2295 	/* If the packet was truncated, none of the other errors matter */
2296 	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2297 		stats->rx_length_errors++;
2298 
2299 		atomic64_inc(&estats->rx_trunc);
2300 
2301 		return;
2302 	}
2303 	/* Count the errors, if there were any */
2304 	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2305 		stats->rx_length_errors++;
2306 
2307 		if (lstatus & BD_LFLAG(RXBD_LARGE))
2308 			atomic64_inc(&estats->rx_large);
2309 		else
2310 			atomic64_inc(&estats->rx_short);
2311 	}
2312 	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2313 		stats->rx_frame_errors++;
2314 		atomic64_inc(&estats->rx_nonoctet);
2315 	}
2316 	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2317 		atomic64_inc(&estats->rx_crcerr);
2318 		stats->rx_crc_errors++;
2319 	}
2320 	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2321 		atomic64_inc(&estats->rx_overrun);
2322 		stats->rx_over_errors++;
2323 	}
2324 }
2325 
2326 static irqreturn_t gfar_receive(int irq, void *grp_id)
2327 {
2328 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2329 	unsigned long flags;
2330 	u32 imask, ievent;
2331 
2332 	ievent = gfar_read(&grp->regs->ievent);
2333 
2334 	if (unlikely(ievent & IEVENT_FGPI)) {
2335 		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2336 		return IRQ_HANDLED;
2337 	}
2338 
2339 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2340 		spin_lock_irqsave(&grp->grplock, flags);
2341 		imask = gfar_read(&grp->regs->imask);
2342 		imask &= IMASK_RX_DISABLED;
2343 		gfar_write(&grp->regs->imask, imask);
2344 		spin_unlock_irqrestore(&grp->grplock, flags);
2345 		__napi_schedule(&grp->napi_rx);
2346 	} else {
2347 		/* Clear IEVENT, so interrupts aren't called again
2348 		 * because of the packets that have already arrived.
2349 		 */
2350 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2351 	}
2352 
2353 	return IRQ_HANDLED;
2354 }
2355 
2356 /* Interrupt Handler for Transmit complete */
2357 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2358 {
2359 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2360 	unsigned long flags;
2361 	u32 imask;
2362 
2363 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2364 		spin_lock_irqsave(&grp->grplock, flags);
2365 		imask = gfar_read(&grp->regs->imask);
2366 		imask &= IMASK_TX_DISABLED;
2367 		gfar_write(&grp->regs->imask, imask);
2368 		spin_unlock_irqrestore(&grp->grplock, flags);
2369 		__napi_schedule(&grp->napi_tx);
2370 	} else {
2371 		/* Clear IEVENT, so interrupts aren't called again
2372 		 * because of the packets that have already arrived.
2373 		 */
2374 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2375 	}
2376 
2377 	return IRQ_HANDLED;
2378 }
2379 
2380 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2381 			     struct sk_buff *skb, bool first)
2382 {
2383 	int size = lstatus & BD_LENGTH_MASK;
2384 	struct page *page = rxb->page;
2385 
2386 	if (likely(first)) {
2387 		skb_put(skb, size);
2388 	} else {
2389 		/* the last fragments' length contains the full frame length */
2390 		if (lstatus & BD_LFLAG(RXBD_LAST))
2391 			size -= skb->len;
2392 
2393 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2394 				rxb->page_offset + RXBUF_ALIGNMENT,
2395 				size, GFAR_RXB_TRUESIZE);
2396 	}
2397 
2398 	/* try reuse page */
2399 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2400 		return false;
2401 
2402 	/* change offset to the other half */
2403 	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2404 
2405 	page_ref_inc(page);
2406 
2407 	return true;
2408 }
2409 
2410 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2411 			       struct gfar_rx_buff *old_rxb)
2412 {
2413 	struct gfar_rx_buff *new_rxb;
2414 	u16 nta = rxq->next_to_alloc;
2415 
2416 	new_rxb = &rxq->rx_buff[nta];
2417 
2418 	/* find next buf that can reuse a page */
2419 	nta++;
2420 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2421 
2422 	/* copy page reference */
2423 	*new_rxb = *old_rxb;
2424 
2425 	/* sync for use by the device */
2426 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2427 					 old_rxb->page_offset,
2428 					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2429 }
2430 
2431 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2432 					    u32 lstatus, struct sk_buff *skb)
2433 {
2434 	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2435 	struct page *page = rxb->page;
2436 	bool first = false;
2437 
2438 	if (likely(!skb)) {
2439 		void *buff_addr = page_address(page) + rxb->page_offset;
2440 
2441 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2442 		if (unlikely(!skb)) {
2443 			gfar_rx_alloc_err(rx_queue);
2444 			return NULL;
2445 		}
2446 		skb_reserve(skb, RXBUF_ALIGNMENT);
2447 		first = true;
2448 	}
2449 
2450 	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2451 				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2452 
2453 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2454 		/* reuse the free half of the page */
2455 		gfar_reuse_rx_page(rx_queue, rxb);
2456 	} else {
2457 		/* page cannot be reused, unmap it */
2458 		dma_unmap_page(rx_queue->dev, rxb->dma,
2459 			       PAGE_SIZE, DMA_FROM_DEVICE);
2460 	}
2461 
2462 	/* clear rxb content */
2463 	rxb->page = NULL;
2464 
2465 	return skb;
2466 }
2467 
2468 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2469 {
2470 	/* If valid headers were found, and valid sums
2471 	 * were verified, then we tell the kernel that no
2472 	 * checksumming is necessary.  Otherwise, it is [FIXME]
2473 	 */
2474 	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2475 	    (RXFCB_CIP | RXFCB_CTU))
2476 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2477 	else
2478 		skb_checksum_none_assert(skb);
2479 }
2480 
2481 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2482 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2483 {
2484 	struct gfar_private *priv = netdev_priv(ndev);
2485 	struct rxfcb *fcb = NULL;
2486 
2487 	/* fcb is at the beginning if exists */
2488 	fcb = (struct rxfcb *)skb->data;
2489 
2490 	/* Remove the FCB from the skb
2491 	 * Remove the padded bytes, if there are any
2492 	 */
2493 	if (priv->uses_rxfcb)
2494 		skb_pull(skb, GMAC_FCB_LEN);
2495 
2496 	/* Get receive timestamp from the skb */
2497 	if (priv->hwts_rx_en) {
2498 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2499 		u64 *ns = (u64 *) skb->data;
2500 
2501 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2502 		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2503 	}
2504 
2505 	if (priv->padding)
2506 		skb_pull(skb, priv->padding);
2507 
2508 	/* Trim off the FCS */
2509 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
2510 
2511 	if (ndev->features & NETIF_F_RXCSUM)
2512 		gfar_rx_checksum(skb, fcb);
2513 
2514 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2515 	 * Even if vlan rx accel is disabled, on some chips
2516 	 * RXFCB_VLN is pseudo randomly set.
2517 	 */
2518 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2519 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
2520 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2521 				       be16_to_cpu(fcb->vlctl));
2522 }
2523 
2524 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2525  * until the budget/quota has been reached. Returns the number
2526  * of frames handled
2527  */
2528 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2529 			      int rx_work_limit)
2530 {
2531 	struct net_device *ndev = rx_queue->ndev;
2532 	struct gfar_private *priv = netdev_priv(ndev);
2533 	struct rxbd8 *bdp;
2534 	int i, howmany = 0;
2535 	struct sk_buff *skb = rx_queue->skb;
2536 	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2537 	unsigned int total_bytes = 0, total_pkts = 0;
2538 
2539 	/* Get the first full descriptor */
2540 	i = rx_queue->next_to_clean;
2541 
2542 	while (rx_work_limit--) {
2543 		u32 lstatus;
2544 
2545 		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2546 			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2547 			cleaned_cnt = 0;
2548 		}
2549 
2550 		bdp = &rx_queue->rx_bd_base[i];
2551 		lstatus = be32_to_cpu(bdp->lstatus);
2552 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
2553 			break;
2554 
2555 		/* order rx buffer descriptor reads */
2556 		rmb();
2557 
2558 		/* fetch next to clean buffer from the ring */
2559 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2560 		if (unlikely(!skb))
2561 			break;
2562 
2563 		cleaned_cnt++;
2564 		howmany++;
2565 
2566 		if (unlikely(++i == rx_queue->rx_ring_size))
2567 			i = 0;
2568 
2569 		rx_queue->next_to_clean = i;
2570 
2571 		/* fetch next buffer if not the last in frame */
2572 		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2573 			continue;
2574 
2575 		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2576 			count_errors(lstatus, ndev);
2577 
2578 			/* discard faulty buffer */
2579 			dev_kfree_skb(skb);
2580 			skb = NULL;
2581 			rx_queue->stats.rx_dropped++;
2582 			continue;
2583 		}
2584 
2585 		gfar_process_frame(ndev, skb);
2586 
2587 		/* Increment the number of packets */
2588 		total_pkts++;
2589 		total_bytes += skb->len;
2590 
2591 		skb_record_rx_queue(skb, rx_queue->qindex);
2592 
2593 		skb->protocol = eth_type_trans(skb, ndev);
2594 
2595 		/* Send the packet up the stack */
2596 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2597 
2598 		skb = NULL;
2599 	}
2600 
2601 	/* Store incomplete frames for completion */
2602 	rx_queue->skb = skb;
2603 
2604 	rx_queue->stats.rx_packets += total_pkts;
2605 	rx_queue->stats.rx_bytes += total_bytes;
2606 
2607 	if (cleaned_cnt)
2608 		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2609 
2610 	/* Update Last Free RxBD pointer for LFC */
2611 	if (unlikely(priv->tx_actual_en)) {
2612 		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2613 
2614 		gfar_write(rx_queue->rfbptr, bdp_dma);
2615 	}
2616 
2617 	return howmany;
2618 }
2619 
2620 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2621 {
2622 	struct gfar_priv_grp *gfargrp =
2623 		container_of(napi, struct gfar_priv_grp, napi_rx);
2624 	struct gfar __iomem *regs = gfargrp->regs;
2625 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2626 	int work_done = 0;
2627 
2628 	/* Clear IEVENT, so interrupts aren't called again
2629 	 * because of the packets that have already arrived
2630 	 */
2631 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2632 
2633 	work_done = gfar_clean_rx_ring(rx_queue, budget);
2634 
2635 	if (work_done < budget) {
2636 		u32 imask;
2637 		napi_complete_done(napi, work_done);
2638 		/* Clear the halt bit in RSTAT */
2639 		gfar_write(&regs->rstat, gfargrp->rstat);
2640 
2641 		spin_lock_irq(&gfargrp->grplock);
2642 		imask = gfar_read(&regs->imask);
2643 		imask |= IMASK_RX_DEFAULT;
2644 		gfar_write(&regs->imask, imask);
2645 		spin_unlock_irq(&gfargrp->grplock);
2646 	}
2647 
2648 	return work_done;
2649 }
2650 
2651 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2652 {
2653 	struct gfar_priv_grp *gfargrp =
2654 		container_of(napi, struct gfar_priv_grp, napi_tx);
2655 	struct gfar __iomem *regs = gfargrp->regs;
2656 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2657 	u32 imask;
2658 
2659 	/* Clear IEVENT, so interrupts aren't called again
2660 	 * because of the packets that have already arrived
2661 	 */
2662 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2663 
2664 	/* run Tx cleanup to completion */
2665 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2666 		gfar_clean_tx_ring(tx_queue);
2667 
2668 	napi_complete(napi);
2669 
2670 	spin_lock_irq(&gfargrp->grplock);
2671 	imask = gfar_read(&regs->imask);
2672 	imask |= IMASK_TX_DEFAULT;
2673 	gfar_write(&regs->imask, imask);
2674 	spin_unlock_irq(&gfargrp->grplock);
2675 
2676 	return 0;
2677 }
2678 
2679 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2680 {
2681 	struct gfar_priv_grp *gfargrp =
2682 		container_of(napi, struct gfar_priv_grp, napi_rx);
2683 	struct gfar_private *priv = gfargrp->priv;
2684 	struct gfar __iomem *regs = gfargrp->regs;
2685 	struct gfar_priv_rx_q *rx_queue = NULL;
2686 	int work_done = 0, work_done_per_q = 0;
2687 	int i, budget_per_q = 0;
2688 	unsigned long rstat_rxf;
2689 	int num_act_queues;
2690 
2691 	/* Clear IEVENT, so interrupts aren't called again
2692 	 * because of the packets that have already arrived
2693 	 */
2694 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2695 
2696 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2697 
2698 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2699 	if (num_act_queues)
2700 		budget_per_q = budget/num_act_queues;
2701 
2702 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2703 		/* skip queue if not active */
2704 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2705 			continue;
2706 
2707 		rx_queue = priv->rx_queue[i];
2708 		work_done_per_q =
2709 			gfar_clean_rx_ring(rx_queue, budget_per_q);
2710 		work_done += work_done_per_q;
2711 
2712 		/* finished processing this queue */
2713 		if (work_done_per_q < budget_per_q) {
2714 			/* clear active queue hw indication */
2715 			gfar_write(&regs->rstat,
2716 				   RSTAT_CLEAR_RXF0 >> i);
2717 			num_act_queues--;
2718 
2719 			if (!num_act_queues)
2720 				break;
2721 		}
2722 	}
2723 
2724 	if (!num_act_queues) {
2725 		u32 imask;
2726 		napi_complete_done(napi, work_done);
2727 
2728 		/* Clear the halt bit in RSTAT */
2729 		gfar_write(&regs->rstat, gfargrp->rstat);
2730 
2731 		spin_lock_irq(&gfargrp->grplock);
2732 		imask = gfar_read(&regs->imask);
2733 		imask |= IMASK_RX_DEFAULT;
2734 		gfar_write(&regs->imask, imask);
2735 		spin_unlock_irq(&gfargrp->grplock);
2736 	}
2737 
2738 	return work_done;
2739 }
2740 
2741 static int gfar_poll_tx(struct napi_struct *napi, int budget)
2742 {
2743 	struct gfar_priv_grp *gfargrp =
2744 		container_of(napi, struct gfar_priv_grp, napi_tx);
2745 	struct gfar_private *priv = gfargrp->priv;
2746 	struct gfar __iomem *regs = gfargrp->regs;
2747 	struct gfar_priv_tx_q *tx_queue = NULL;
2748 	int has_tx_work = 0;
2749 	int i;
2750 
2751 	/* Clear IEVENT, so interrupts aren't called again
2752 	 * because of the packets that have already arrived
2753 	 */
2754 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
2755 
2756 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2757 		tx_queue = priv->tx_queue[i];
2758 		/* run Tx cleanup to completion */
2759 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2760 			gfar_clean_tx_ring(tx_queue);
2761 			has_tx_work = 1;
2762 		}
2763 	}
2764 
2765 	if (!has_tx_work) {
2766 		u32 imask;
2767 		napi_complete(napi);
2768 
2769 		spin_lock_irq(&gfargrp->grplock);
2770 		imask = gfar_read(&regs->imask);
2771 		imask |= IMASK_TX_DEFAULT;
2772 		gfar_write(&regs->imask, imask);
2773 		spin_unlock_irq(&gfargrp->grplock);
2774 	}
2775 
2776 	return 0;
2777 }
2778 
2779 /* GFAR error interrupt handler */
2780 static irqreturn_t gfar_error(int irq, void *grp_id)
2781 {
2782 	struct gfar_priv_grp *gfargrp = grp_id;
2783 	struct gfar __iomem *regs = gfargrp->regs;
2784 	struct gfar_private *priv= gfargrp->priv;
2785 	struct net_device *dev = priv->ndev;
2786 
2787 	/* Save ievent for future reference */
2788 	u32 events = gfar_read(&regs->ievent);
2789 
2790 	/* Clear IEVENT */
2791 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2792 
2793 	/* Magic Packet is not an error. */
2794 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2795 	    (events & IEVENT_MAG))
2796 		events &= ~IEVENT_MAG;
2797 
2798 	/* Hmm... */
2799 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2800 		netdev_dbg(dev,
2801 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2802 			   events, gfar_read(&regs->imask));
2803 
2804 	/* Update the error counters */
2805 	if (events & IEVENT_TXE) {
2806 		dev->stats.tx_errors++;
2807 
2808 		if (events & IEVENT_LC)
2809 			dev->stats.tx_window_errors++;
2810 		if (events & IEVENT_CRL)
2811 			dev->stats.tx_aborted_errors++;
2812 		if (events & IEVENT_XFUN) {
2813 			netif_dbg(priv, tx_err, dev,
2814 				  "TX FIFO underrun, packet dropped\n");
2815 			dev->stats.tx_dropped++;
2816 			atomic64_inc(&priv->extra_stats.tx_underrun);
2817 
2818 			schedule_work(&priv->reset_task);
2819 		}
2820 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2821 	}
2822 	if (events & IEVENT_BSY) {
2823 		dev->stats.rx_over_errors++;
2824 		atomic64_inc(&priv->extra_stats.rx_bsy);
2825 
2826 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2827 			  gfar_read(&regs->rstat));
2828 	}
2829 	if (events & IEVENT_BABR) {
2830 		dev->stats.rx_errors++;
2831 		atomic64_inc(&priv->extra_stats.rx_babr);
2832 
2833 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2834 	}
2835 	if (events & IEVENT_EBERR) {
2836 		atomic64_inc(&priv->extra_stats.eberr);
2837 		netif_dbg(priv, rx_err, dev, "bus error\n");
2838 	}
2839 	if (events & IEVENT_RXC)
2840 		netif_dbg(priv, rx_status, dev, "control frame\n");
2841 
2842 	if (events & IEVENT_BABT) {
2843 		atomic64_inc(&priv->extra_stats.tx_babt);
2844 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2845 	}
2846 	return IRQ_HANDLED;
2847 }
2848 
2849 /* The interrupt handler for devices with one interrupt */
2850 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2851 {
2852 	struct gfar_priv_grp *gfargrp = grp_id;
2853 
2854 	/* Save ievent for future reference */
2855 	u32 events = gfar_read(&gfargrp->regs->ievent);
2856 
2857 	/* Check for reception */
2858 	if (events & IEVENT_RX_MASK)
2859 		gfar_receive(irq, grp_id);
2860 
2861 	/* Check for transmit completion */
2862 	if (events & IEVENT_TX_MASK)
2863 		gfar_transmit(irq, grp_id);
2864 
2865 	/* Check for errors */
2866 	if (events & IEVENT_ERR_MASK)
2867 		gfar_error(irq, grp_id);
2868 
2869 	return IRQ_HANDLED;
2870 }
2871 
2872 #ifdef CONFIG_NET_POLL_CONTROLLER
2873 /* Polling 'interrupt' - used by things like netconsole to send skbs
2874  * without having to re-enable interrupts. It's not called while
2875  * the interrupt routine is executing.
2876  */
2877 static void gfar_netpoll(struct net_device *dev)
2878 {
2879 	struct gfar_private *priv = netdev_priv(dev);
2880 	int i;
2881 
2882 	/* If the device has multiple interrupts, run tx/rx */
2883 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2884 		for (i = 0; i < priv->num_grps; i++) {
2885 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
2886 
2887 			disable_irq(gfar_irq(grp, TX)->irq);
2888 			disable_irq(gfar_irq(grp, RX)->irq);
2889 			disable_irq(gfar_irq(grp, ER)->irq);
2890 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2891 			enable_irq(gfar_irq(grp, ER)->irq);
2892 			enable_irq(gfar_irq(grp, RX)->irq);
2893 			enable_irq(gfar_irq(grp, TX)->irq);
2894 		}
2895 	} else {
2896 		for (i = 0; i < priv->num_grps; i++) {
2897 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
2898 
2899 			disable_irq(gfar_irq(grp, TX)->irq);
2900 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2901 			enable_irq(gfar_irq(grp, TX)->irq);
2902 		}
2903 	}
2904 }
2905 #endif
2906 
2907 static void free_grp_irqs(struct gfar_priv_grp *grp)
2908 {
2909 	free_irq(gfar_irq(grp, TX)->irq, grp);
2910 	free_irq(gfar_irq(grp, RX)->irq, grp);
2911 	free_irq(gfar_irq(grp, ER)->irq, grp);
2912 }
2913 
2914 static int register_grp_irqs(struct gfar_priv_grp *grp)
2915 {
2916 	struct gfar_private *priv = grp->priv;
2917 	struct net_device *dev = priv->ndev;
2918 	int err;
2919 
2920 	/* If the device has multiple interrupts, register for
2921 	 * them.  Otherwise, only register for the one
2922 	 */
2923 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2924 		/* Install our interrupt handlers for Error,
2925 		 * Transmit, and Receive
2926 		 */
2927 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2928 				  gfar_irq(grp, ER)->name, grp);
2929 		if (err < 0) {
2930 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2931 				  gfar_irq(grp, ER)->irq);
2932 
2933 			goto err_irq_fail;
2934 		}
2935 		enable_irq_wake(gfar_irq(grp, ER)->irq);
2936 
2937 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2938 				  gfar_irq(grp, TX)->name, grp);
2939 		if (err < 0) {
2940 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2941 				  gfar_irq(grp, TX)->irq);
2942 			goto tx_irq_fail;
2943 		}
2944 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2945 				  gfar_irq(grp, RX)->name, grp);
2946 		if (err < 0) {
2947 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2948 				  gfar_irq(grp, RX)->irq);
2949 			goto rx_irq_fail;
2950 		}
2951 		enable_irq_wake(gfar_irq(grp, RX)->irq);
2952 
2953 	} else {
2954 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2955 				  gfar_irq(grp, TX)->name, grp);
2956 		if (err < 0) {
2957 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2958 				  gfar_irq(grp, TX)->irq);
2959 			goto err_irq_fail;
2960 		}
2961 		enable_irq_wake(gfar_irq(grp, TX)->irq);
2962 	}
2963 
2964 	return 0;
2965 
2966 rx_irq_fail:
2967 	free_irq(gfar_irq(grp, TX)->irq, grp);
2968 tx_irq_fail:
2969 	free_irq(gfar_irq(grp, ER)->irq, grp);
2970 err_irq_fail:
2971 	return err;
2972 
2973 }
2974 
2975 static void gfar_free_irq(struct gfar_private *priv)
2976 {
2977 	int i;
2978 
2979 	/* Free the IRQs */
2980 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2981 		for (i = 0; i < priv->num_grps; i++)
2982 			free_grp_irqs(&priv->gfargrp[i]);
2983 	} else {
2984 		for (i = 0; i < priv->num_grps; i++)
2985 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2986 				 &priv->gfargrp[i]);
2987 	}
2988 }
2989 
2990 static int gfar_request_irq(struct gfar_private *priv)
2991 {
2992 	int err, i, j;
2993 
2994 	for (i = 0; i < priv->num_grps; i++) {
2995 		err = register_grp_irqs(&priv->gfargrp[i]);
2996 		if (err) {
2997 			for (j = 0; j < i; j++)
2998 				free_grp_irqs(&priv->gfargrp[j]);
2999 			return err;
3000 		}
3001 	}
3002 
3003 	return 0;
3004 }
3005 
3006 /* Called when something needs to use the ethernet device
3007  * Returns 0 for success.
3008  */
3009 static int gfar_enet_open(struct net_device *dev)
3010 {
3011 	struct gfar_private *priv = netdev_priv(dev);
3012 	int err;
3013 
3014 	err = init_phy(dev);
3015 	if (err)
3016 		return err;
3017 
3018 	err = gfar_request_irq(priv);
3019 	if (err)
3020 		return err;
3021 
3022 	err = startup_gfar(dev);
3023 	if (err)
3024 		return err;
3025 
3026 	return err;
3027 }
3028 
3029 /* Stops the kernel queue, and halts the controller */
3030 static int gfar_close(struct net_device *dev)
3031 {
3032 	struct gfar_private *priv = netdev_priv(dev);
3033 
3034 	cancel_work_sync(&priv->reset_task);
3035 	stop_gfar(dev);
3036 
3037 	/* Disconnect from the PHY */
3038 	phy_disconnect(dev->phydev);
3039 
3040 	gfar_free_irq(priv);
3041 
3042 	return 0;
3043 }
3044 
3045 /* Clears each of the exact match registers to zero, so they
3046  * don't interfere with normal reception
3047  */
3048 static void gfar_clear_exact_match(struct net_device *dev)
3049 {
3050 	int idx;
3051 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3052 
3053 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3054 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3055 }
3056 
3057 /* Update the hash table based on the current list of multicast
3058  * addresses we subscribe to.  Also, change the promiscuity of
3059  * the device based on the flags (this function is called
3060  * whenever dev->flags is changed
3061  */
3062 static void gfar_set_multi(struct net_device *dev)
3063 {
3064 	struct netdev_hw_addr *ha;
3065 	struct gfar_private *priv = netdev_priv(dev);
3066 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3067 	u32 tempval;
3068 
3069 	if (dev->flags & IFF_PROMISC) {
3070 		/* Set RCTRL to PROM */
3071 		tempval = gfar_read(&regs->rctrl);
3072 		tempval |= RCTRL_PROM;
3073 		gfar_write(&regs->rctrl, tempval);
3074 	} else {
3075 		/* Set RCTRL to not PROM */
3076 		tempval = gfar_read(&regs->rctrl);
3077 		tempval &= ~(RCTRL_PROM);
3078 		gfar_write(&regs->rctrl, tempval);
3079 	}
3080 
3081 	if (dev->flags & IFF_ALLMULTI) {
3082 		/* Set the hash to rx all multicast frames */
3083 		gfar_write(&regs->igaddr0, 0xffffffff);
3084 		gfar_write(&regs->igaddr1, 0xffffffff);
3085 		gfar_write(&regs->igaddr2, 0xffffffff);
3086 		gfar_write(&regs->igaddr3, 0xffffffff);
3087 		gfar_write(&regs->igaddr4, 0xffffffff);
3088 		gfar_write(&regs->igaddr5, 0xffffffff);
3089 		gfar_write(&regs->igaddr6, 0xffffffff);
3090 		gfar_write(&regs->igaddr7, 0xffffffff);
3091 		gfar_write(&regs->gaddr0, 0xffffffff);
3092 		gfar_write(&regs->gaddr1, 0xffffffff);
3093 		gfar_write(&regs->gaddr2, 0xffffffff);
3094 		gfar_write(&regs->gaddr3, 0xffffffff);
3095 		gfar_write(&regs->gaddr4, 0xffffffff);
3096 		gfar_write(&regs->gaddr5, 0xffffffff);
3097 		gfar_write(&regs->gaddr6, 0xffffffff);
3098 		gfar_write(&regs->gaddr7, 0xffffffff);
3099 	} else {
3100 		int em_num;
3101 		int idx;
3102 
3103 		/* zero out the hash */
3104 		gfar_write(&regs->igaddr0, 0x0);
3105 		gfar_write(&regs->igaddr1, 0x0);
3106 		gfar_write(&regs->igaddr2, 0x0);
3107 		gfar_write(&regs->igaddr3, 0x0);
3108 		gfar_write(&regs->igaddr4, 0x0);
3109 		gfar_write(&regs->igaddr5, 0x0);
3110 		gfar_write(&regs->igaddr6, 0x0);
3111 		gfar_write(&regs->igaddr7, 0x0);
3112 		gfar_write(&regs->gaddr0, 0x0);
3113 		gfar_write(&regs->gaddr1, 0x0);
3114 		gfar_write(&regs->gaddr2, 0x0);
3115 		gfar_write(&regs->gaddr3, 0x0);
3116 		gfar_write(&regs->gaddr4, 0x0);
3117 		gfar_write(&regs->gaddr5, 0x0);
3118 		gfar_write(&regs->gaddr6, 0x0);
3119 		gfar_write(&regs->gaddr7, 0x0);
3120 
3121 		/* If we have extended hash tables, we need to
3122 		 * clear the exact match registers to prepare for
3123 		 * setting them
3124 		 */
3125 		if (priv->extended_hash) {
3126 			em_num = GFAR_EM_NUM + 1;
3127 			gfar_clear_exact_match(dev);
3128 			idx = 1;
3129 		} else {
3130 			idx = 0;
3131 			em_num = 0;
3132 		}
3133 
3134 		if (netdev_mc_empty(dev))
3135 			return;
3136 
3137 		/* Parse the list, and set the appropriate bits */
3138 		netdev_for_each_mc_addr(ha, dev) {
3139 			if (idx < em_num) {
3140 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3141 				idx++;
3142 			} else
3143 				gfar_set_hash_for_addr(dev, ha->addr);
3144 		}
3145 	}
3146 }
3147 
3148 void gfar_mac_reset(struct gfar_private *priv)
3149 {
3150 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3151 	u32 tempval;
3152 
3153 	/* Reset MAC layer */
3154 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
3155 
3156 	/* We need to delay at least 3 TX clocks */
3157 	udelay(3);
3158 
3159 	/* the soft reset bit is not self-resetting, so we need to
3160 	 * clear it before resuming normal operation
3161 	 */
3162 	gfar_write(&regs->maccfg1, 0);
3163 
3164 	udelay(3);
3165 
3166 	gfar_rx_offload_en(priv);
3167 
3168 	/* Initialize the max receive frame/buffer lengths */
3169 	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3170 	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
3171 
3172 	/* Initialize the Minimum Frame Length Register */
3173 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
3174 
3175 	/* Initialize MACCFG2. */
3176 	tempval = MACCFG2_INIT_SETTINGS;
3177 
3178 	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3179 	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
3180 	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3181 	 */
3182 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
3183 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3184 
3185 	gfar_write(&regs->maccfg2, tempval);
3186 
3187 	/* Clear mac addr hash registers */
3188 	gfar_write(&regs->igaddr0, 0);
3189 	gfar_write(&regs->igaddr1, 0);
3190 	gfar_write(&regs->igaddr2, 0);
3191 	gfar_write(&regs->igaddr3, 0);
3192 	gfar_write(&regs->igaddr4, 0);
3193 	gfar_write(&regs->igaddr5, 0);
3194 	gfar_write(&regs->igaddr6, 0);
3195 	gfar_write(&regs->igaddr7, 0);
3196 
3197 	gfar_write(&regs->gaddr0, 0);
3198 	gfar_write(&regs->gaddr1, 0);
3199 	gfar_write(&regs->gaddr2, 0);
3200 	gfar_write(&regs->gaddr3, 0);
3201 	gfar_write(&regs->gaddr4, 0);
3202 	gfar_write(&regs->gaddr5, 0);
3203 	gfar_write(&regs->gaddr6, 0);
3204 	gfar_write(&regs->gaddr7, 0);
3205 
3206 	if (priv->extended_hash)
3207 		gfar_clear_exact_match(priv->ndev);
3208 
3209 	gfar_mac_rx_config(priv);
3210 
3211 	gfar_mac_tx_config(priv);
3212 
3213 	gfar_set_mac_address(priv->ndev);
3214 
3215 	gfar_set_multi(priv->ndev);
3216 
3217 	/* clear ievent and imask before configuring coalescing */
3218 	gfar_ints_disable(priv);
3219 
3220 	/* Configure the coalescing support */
3221 	gfar_configure_coalescing_all(priv);
3222 }
3223 
3224 static void gfar_hw_init(struct gfar_private *priv)
3225 {
3226 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3227 	u32 attrs;
3228 
3229 	/* Stop the DMA engine now, in case it was running before
3230 	 * (The firmware could have used it, and left it running).
3231 	 */
3232 	gfar_halt(priv);
3233 
3234 	gfar_mac_reset(priv);
3235 
3236 	/* Zero out the rmon mib registers if it has them */
3237 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3238 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3239 
3240 		/* Mask off the CAM interrupts */
3241 		gfar_write(&regs->rmon.cam1, 0xffffffff);
3242 		gfar_write(&regs->rmon.cam2, 0xffffffff);
3243 	}
3244 
3245 	/* Initialize ECNTRL */
3246 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
3247 
3248 	/* Set the extraction length and index */
3249 	attrs = ATTRELI_EL(priv->rx_stash_size) |
3250 		ATTRELI_EI(priv->rx_stash_index);
3251 
3252 	gfar_write(&regs->attreli, attrs);
3253 
3254 	/* Start with defaults, and add stashing
3255 	 * depending on driver parameters
3256 	 */
3257 	attrs = ATTR_INIT_SETTINGS;
3258 
3259 	if (priv->bd_stash_en)
3260 		attrs |= ATTR_BDSTASH;
3261 
3262 	if (priv->rx_stash_size != 0)
3263 		attrs |= ATTR_BUFSTASH;
3264 
3265 	gfar_write(&regs->attr, attrs);
3266 
3267 	/* FIFO configs */
3268 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3269 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3270 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3271 
3272 	/* Program the interrupt steering regs, only for MG devices */
3273 	if (priv->num_grps > 1)
3274 		gfar_write_isrg(priv);
3275 }
3276 
3277 static const struct net_device_ops gfar_netdev_ops = {
3278 	.ndo_open = gfar_enet_open,
3279 	.ndo_start_xmit = gfar_start_xmit,
3280 	.ndo_stop = gfar_close,
3281 	.ndo_change_mtu = gfar_change_mtu,
3282 	.ndo_set_features = gfar_set_features,
3283 	.ndo_set_rx_mode = gfar_set_multi,
3284 	.ndo_tx_timeout = gfar_timeout,
3285 	.ndo_do_ioctl = gfar_ioctl,
3286 	.ndo_get_stats = gfar_get_stats,
3287 	.ndo_change_carrier = fixed_phy_change_carrier,
3288 	.ndo_set_mac_address = gfar_set_mac_addr,
3289 	.ndo_validate_addr = eth_validate_addr,
3290 #ifdef CONFIG_NET_POLL_CONTROLLER
3291 	.ndo_poll_controller = gfar_netpoll,
3292 #endif
3293 };
3294 
3295 /* Set up the ethernet device structure, private data,
3296  * and anything else we need before we start
3297  */
3298 static int gfar_probe(struct platform_device *ofdev)
3299 {
3300 	struct device_node *np = ofdev->dev.of_node;
3301 	struct net_device *dev = NULL;
3302 	struct gfar_private *priv = NULL;
3303 	int err = 0, i;
3304 
3305 	err = gfar_of_init(ofdev, &dev);
3306 
3307 	if (err)
3308 		return err;
3309 
3310 	priv = netdev_priv(dev);
3311 	priv->ndev = dev;
3312 	priv->ofdev = ofdev;
3313 	priv->dev = &ofdev->dev;
3314 	SET_NETDEV_DEV(dev, &ofdev->dev);
3315 
3316 	INIT_WORK(&priv->reset_task, gfar_reset_task);
3317 
3318 	platform_set_drvdata(ofdev, priv);
3319 
3320 	gfar_detect_errata(priv);
3321 
3322 	/* Set the dev->base_addr to the gfar reg region */
3323 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3324 
3325 	/* Fill in the dev structure */
3326 	dev->watchdog_timeo = TX_TIMEOUT;
3327 	/* MTU range: 50 - 9586 */
3328 	dev->mtu = 1500;
3329 	dev->min_mtu = 50;
3330 	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3331 	dev->netdev_ops = &gfar_netdev_ops;
3332 	dev->ethtool_ops = &gfar_ethtool_ops;
3333 
3334 	/* Register for napi ...We are registering NAPI for each grp */
3335 	for (i = 0; i < priv->num_grps; i++) {
3336 		if (priv->poll_mode == GFAR_SQ_POLLING) {
3337 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3338 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3339 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3340 				       gfar_poll_tx_sq, 2);
3341 		} else {
3342 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3343 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
3344 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3345 				       gfar_poll_tx, 2);
3346 		}
3347 	}
3348 
3349 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3350 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3351 				   NETIF_F_RXCSUM;
3352 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3353 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3354 	}
3355 
3356 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3357 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3358 				    NETIF_F_HW_VLAN_CTAG_RX;
3359 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3360 	}
3361 
3362 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3363 
3364 	gfar_init_addr_hash_table(priv);
3365 
3366 	/* Insert receive time stamps into padding alignment bytes, and
3367 	 * plus 2 bytes padding to ensure the cpu alignment.
3368 	 */
3369 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3370 		priv->padding = 8 + DEFAULT_PADDING;
3371 
3372 	if (dev->features & NETIF_F_IP_CSUM ||
3373 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3374 		dev->needed_headroom = GMAC_FCB_LEN;
3375 
3376 	/* Initializing some of the rx/tx queue level parameters */
3377 	for (i = 0; i < priv->num_tx_queues; i++) {
3378 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3379 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3380 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3381 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
3382 	}
3383 
3384 	for (i = 0; i < priv->num_rx_queues; i++) {
3385 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3386 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3387 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3388 	}
3389 
3390 	/* Always enable rx filer if available */
3391 	priv->rx_filer_enable =
3392 	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3393 	/* Enable most messages by default */
3394 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3395 	/* use pritority h/w tx queue scheduling for single queue devices */
3396 	if (priv->num_tx_queues == 1)
3397 		priv->prio_sched_en = 1;
3398 
3399 	set_bit(GFAR_DOWN, &priv->state);
3400 
3401 	gfar_hw_init(priv);
3402 
3403 	/* Carrier starts down, phylib will bring it up */
3404 	netif_carrier_off(dev);
3405 
3406 	err = register_netdev(dev);
3407 
3408 	if (err) {
3409 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
3410 		goto register_fail;
3411 	}
3412 
3413 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3414 		priv->wol_supported |= GFAR_WOL_MAGIC;
3415 
3416 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3417 	    priv->rx_filer_enable)
3418 		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3419 
3420 	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3421 
3422 	/* fill out IRQ number and name fields */
3423 	for (i = 0; i < priv->num_grps; i++) {
3424 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
3425 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3426 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3427 				dev->name, "_g", '0' + i, "_tx");
3428 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3429 				dev->name, "_g", '0' + i, "_rx");
3430 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3431 				dev->name, "_g", '0' + i, "_er");
3432 		} else
3433 			strcpy(gfar_irq(grp, TX)->name, dev->name);
3434 	}
3435 
3436 	/* Initialize the filer table */
3437 	gfar_init_filer_table(priv);
3438 
3439 	/* Print out the device info */
3440 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3441 
3442 	/* Even more device info helps when determining which kernel
3443 	 * provided which set of benchmarks.
3444 	 */
3445 	netdev_info(dev, "Running with NAPI enabled\n");
3446 	for (i = 0; i < priv->num_rx_queues; i++)
3447 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3448 			    i, priv->rx_queue[i]->rx_ring_size);
3449 	for (i = 0; i < priv->num_tx_queues; i++)
3450 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3451 			    i, priv->tx_queue[i]->tx_ring_size);
3452 
3453 	return 0;
3454 
3455 register_fail:
3456 	if (of_phy_is_fixed_link(np))
3457 		of_phy_deregister_fixed_link(np);
3458 	unmap_group_regs(priv);
3459 	gfar_free_rx_queues(priv);
3460 	gfar_free_tx_queues(priv);
3461 	of_node_put(priv->phy_node);
3462 	of_node_put(priv->tbi_node);
3463 	free_gfar_dev(priv);
3464 	return err;
3465 }
3466 
3467 static int gfar_remove(struct platform_device *ofdev)
3468 {
3469 	struct gfar_private *priv = platform_get_drvdata(ofdev);
3470 	struct device_node *np = ofdev->dev.of_node;
3471 
3472 	of_node_put(priv->phy_node);
3473 	of_node_put(priv->tbi_node);
3474 
3475 	unregister_netdev(priv->ndev);
3476 
3477 	if (of_phy_is_fixed_link(np))
3478 		of_phy_deregister_fixed_link(np);
3479 
3480 	unmap_group_regs(priv);
3481 	gfar_free_rx_queues(priv);
3482 	gfar_free_tx_queues(priv);
3483 	free_gfar_dev(priv);
3484 
3485 	return 0;
3486 }
3487 
3488 #ifdef CONFIG_PM
3489 
3490 static void __gfar_filer_disable(struct gfar_private *priv)
3491 {
3492 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3493 	u32 temp;
3494 
3495 	temp = gfar_read(&regs->rctrl);
3496 	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3497 	gfar_write(&regs->rctrl, temp);
3498 }
3499 
3500 static void __gfar_filer_enable(struct gfar_private *priv)
3501 {
3502 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3503 	u32 temp;
3504 
3505 	temp = gfar_read(&regs->rctrl);
3506 	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3507 	gfar_write(&regs->rctrl, temp);
3508 }
3509 
3510 /* Filer rules implementing wol capabilities */
3511 static void gfar_filer_config_wol(struct gfar_private *priv)
3512 {
3513 	unsigned int i;
3514 	u32 rqfcr;
3515 
3516 	__gfar_filer_disable(priv);
3517 
3518 	/* clear the filer table, reject any packet by default */
3519 	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3520 	for (i = 0; i <= MAX_FILER_IDX; i++)
3521 		gfar_write_filer(priv, i, rqfcr, 0);
3522 
3523 	i = 0;
3524 	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3525 		/* unicast packet, accept it */
3526 		struct net_device *ndev = priv->ndev;
3527 		/* get the default rx queue index */
3528 		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3529 		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3530 				    (ndev->dev_addr[1] << 8) |
3531 				     ndev->dev_addr[2];
3532 
3533 		rqfcr = (qindex << 10) | RQFCR_AND |
3534 			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3535 
3536 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3537 
3538 		dest_mac_addr = (ndev->dev_addr[3] << 16) |
3539 				(ndev->dev_addr[4] << 8) |
3540 				 ndev->dev_addr[5];
3541 		rqfcr = (qindex << 10) | RQFCR_GPI |
3542 			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3543 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3544 	}
3545 
3546 	__gfar_filer_enable(priv);
3547 }
3548 
3549 static void gfar_filer_restore_table(struct gfar_private *priv)
3550 {
3551 	u32 rqfcr, rqfpr;
3552 	unsigned int i;
3553 
3554 	__gfar_filer_disable(priv);
3555 
3556 	for (i = 0; i <= MAX_FILER_IDX; i++) {
3557 		rqfcr = priv->ftp_rqfcr[i];
3558 		rqfpr = priv->ftp_rqfpr[i];
3559 		gfar_write_filer(priv, i, rqfcr, rqfpr);
3560 	}
3561 
3562 	__gfar_filer_enable(priv);
3563 }
3564 
3565 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
3566 static void gfar_start_wol_filer(struct gfar_private *priv)
3567 {
3568 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3569 	u32 tempval;
3570 	int i = 0;
3571 
3572 	/* Enable Rx hw queues */
3573 	gfar_write(&regs->rqueue, priv->rqueue);
3574 
3575 	/* Initialize DMACTRL to have WWR and WOP */
3576 	tempval = gfar_read(&regs->dmactrl);
3577 	tempval |= DMACTRL_INIT_SETTINGS;
3578 	gfar_write(&regs->dmactrl, tempval);
3579 
3580 	/* Make sure we aren't stopped */
3581 	tempval = gfar_read(&regs->dmactrl);
3582 	tempval &= ~DMACTRL_GRS;
3583 	gfar_write(&regs->dmactrl, tempval);
3584 
3585 	for (i = 0; i < priv->num_grps; i++) {
3586 		regs = priv->gfargrp[i].regs;
3587 		/* Clear RHLT, so that the DMA starts polling now */
3588 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
3589 		/* enable the Filer General Purpose Interrupt */
3590 		gfar_write(&regs->imask, IMASK_FGPI);
3591 	}
3592 
3593 	/* Enable Rx DMA */
3594 	tempval = gfar_read(&regs->maccfg1);
3595 	tempval |= MACCFG1_RX_EN;
3596 	gfar_write(&regs->maccfg1, tempval);
3597 }
3598 
3599 static int gfar_suspend(struct device *dev)
3600 {
3601 	struct gfar_private *priv = dev_get_drvdata(dev);
3602 	struct net_device *ndev = priv->ndev;
3603 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3604 	u32 tempval;
3605 	u16 wol = priv->wol_opts;
3606 
3607 	if (!netif_running(ndev))
3608 		return 0;
3609 
3610 	disable_napi(priv);
3611 	netif_tx_lock(ndev);
3612 	netif_device_detach(ndev);
3613 	netif_tx_unlock(ndev);
3614 
3615 	gfar_halt(priv);
3616 
3617 	if (wol & GFAR_WOL_MAGIC) {
3618 		/* Enable interrupt on Magic Packet */
3619 		gfar_write(&regs->imask, IMASK_MAG);
3620 
3621 		/* Enable Magic Packet mode */
3622 		tempval = gfar_read(&regs->maccfg2);
3623 		tempval |= MACCFG2_MPEN;
3624 		gfar_write(&regs->maccfg2, tempval);
3625 
3626 		/* re-enable the Rx block */
3627 		tempval = gfar_read(&regs->maccfg1);
3628 		tempval |= MACCFG1_RX_EN;
3629 		gfar_write(&regs->maccfg1, tempval);
3630 
3631 	} else if (wol & GFAR_WOL_FILER_UCAST) {
3632 		gfar_filer_config_wol(priv);
3633 		gfar_start_wol_filer(priv);
3634 
3635 	} else {
3636 		phy_stop(ndev->phydev);
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static int gfar_resume(struct device *dev)
3643 {
3644 	struct gfar_private *priv = dev_get_drvdata(dev);
3645 	struct net_device *ndev = priv->ndev;
3646 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3647 	u32 tempval;
3648 	u16 wol = priv->wol_opts;
3649 
3650 	if (!netif_running(ndev))
3651 		return 0;
3652 
3653 	if (wol & GFAR_WOL_MAGIC) {
3654 		/* Disable Magic Packet mode */
3655 		tempval = gfar_read(&regs->maccfg2);
3656 		tempval &= ~MACCFG2_MPEN;
3657 		gfar_write(&regs->maccfg2, tempval);
3658 
3659 	} else if (wol & GFAR_WOL_FILER_UCAST) {
3660 		/* need to stop rx only, tx is already down */
3661 		gfar_halt(priv);
3662 		gfar_filer_restore_table(priv);
3663 
3664 	} else {
3665 		phy_start(ndev->phydev);
3666 	}
3667 
3668 	gfar_start(priv);
3669 
3670 	netif_device_attach(ndev);
3671 	enable_napi(priv);
3672 
3673 	return 0;
3674 }
3675 
3676 static int gfar_restore(struct device *dev)
3677 {
3678 	struct gfar_private *priv = dev_get_drvdata(dev);
3679 	struct net_device *ndev = priv->ndev;
3680 
3681 	if (!netif_running(ndev)) {
3682 		netif_device_attach(ndev);
3683 
3684 		return 0;
3685 	}
3686 
3687 	gfar_init_bds(ndev);
3688 
3689 	gfar_mac_reset(priv);
3690 
3691 	gfar_init_tx_rx_base(priv);
3692 
3693 	gfar_start(priv);
3694 
3695 	priv->oldlink = 0;
3696 	priv->oldspeed = 0;
3697 	priv->oldduplex = -1;
3698 
3699 	if (ndev->phydev)
3700 		phy_start(ndev->phydev);
3701 
3702 	netif_device_attach(ndev);
3703 	enable_napi(priv);
3704 
3705 	return 0;
3706 }
3707 
3708 static const struct dev_pm_ops gfar_pm_ops = {
3709 	.suspend = gfar_suspend,
3710 	.resume = gfar_resume,
3711 	.freeze = gfar_suspend,
3712 	.thaw = gfar_resume,
3713 	.restore = gfar_restore,
3714 };
3715 
3716 #define GFAR_PM_OPS (&gfar_pm_ops)
3717 
3718 #else
3719 
3720 #define GFAR_PM_OPS NULL
3721 
3722 #endif
3723 
3724 static const struct of_device_id gfar_match[] =
3725 {
3726 	{
3727 		.type = "network",
3728 		.compatible = "gianfar",
3729 	},
3730 	{
3731 		.compatible = "fsl,etsec2",
3732 	},
3733 	{},
3734 };
3735 MODULE_DEVICE_TABLE(of, gfar_match);
3736 
3737 /* Structure for a device driver */
3738 static struct platform_driver gfar_driver = {
3739 	.driver = {
3740 		.name = "fsl-gianfar",
3741 		.pm = GFAR_PM_OPS,
3742 		.of_match_table = gfar_match,
3743 	},
3744 	.probe = gfar_probe,
3745 	.remove = gfar_remove,
3746 };
3747 
3748 module_platform_driver(gfar_driver);
3749