1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Xilinx TEMAC Ethernet device
4  *
5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8  *
9  * This is a driver for the Xilinx ll_temac ipcore which is often used
10  * in the Virtex and Spartan series of chips.
11  *
12  * Notes:
13  * - The ll_temac hardware uses indirect access for many of the TEMAC
14  *   registers, include the MDIO bus.  However, indirect access to MDIO
15  *   registers take considerably more clock cycles than to TEMAC registers.
16  *   MDIO accesses are long, so threads doing them should probably sleep
17  *   rather than busywait.  However, since only one indirect access can be
18  *   in progress at any given time, that means that *all* indirect accesses
19  *   could end up sleeping (to wait for an MDIO access to complete).
20  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
21  *   or rx, so this should be okay.
22  *
23  * TODO:
24  * - Factor out locallink DMA code into separate driver
25  * - Fix support for hardware checksumming.
26  * - Testing.  Lots and lots of testing.
27  *
28  */
29 
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
49 #include <linux/in.h>
50 #include <linux/io.h>
51 #include <linux/ip.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/processor.h>
56 #include <linux/platform_data/xilinx-ll-temac.h>
57 
58 #include "ll_temac.h"
59 
60 #define TX_BD_NUM   64
61 #define RX_BD_NUM   128
62 
63 /* ---------------------------------------------------------------------
64  * Low level register access functions
65  */
66 
67 static u32 _temac_ior_be(struct temac_local *lp, int offset)
68 {
69 	return ioread32be(lp->regs + offset);
70 }
71 
72 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
73 {
74 	return iowrite32be(value, lp->regs + offset);
75 }
76 
77 static u32 _temac_ior_le(struct temac_local *lp, int offset)
78 {
79 	return ioread32(lp->regs + offset);
80 }
81 
82 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
83 {
84 	return iowrite32(value, lp->regs + offset);
85 }
86 
87 static bool hard_acs_rdy(struct temac_local *lp)
88 {
89 	return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
90 }
91 
92 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
93 {
94 	ktime_t cur = ktime_get();
95 
96 	return hard_acs_rdy(lp) || ktime_after(cur, timeout);
97 }
98 
99 /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
100  * that was used before, and should cover MDIO bus speed down to 3200
101  * Hz.
102  */
103 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
104 
105 /**
106  * temac_indirect_busywait - Wait for current indirect register access
107  * to complete.
108  */
109 int temac_indirect_busywait(struct temac_local *lp)
110 {
111 	ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
112 
113 	spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
114 	if (WARN_ON(!hard_acs_rdy(lp)))
115 		return -ETIMEDOUT;
116 	else
117 		return 0;
118 }
119 
120 /**
121  * temac_indirect_in32 - Indirect register read access.  This function
122  * must be called without lp->indirect_lock being held.
123  */
124 u32 temac_indirect_in32(struct temac_local *lp, int reg)
125 {
126 	unsigned long flags;
127 	int val;
128 
129 	spin_lock_irqsave(lp->indirect_lock, flags);
130 	val = temac_indirect_in32_locked(lp, reg);
131 	spin_unlock_irqrestore(lp->indirect_lock, flags);
132 	return val;
133 }
134 
135 /**
136  * temac_indirect_in32_locked - Indirect register read access.  This
137  * function must be called with lp->indirect_lock being held.  Use
138  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
139  * repeated lock/unlock and to ensure uninterrupted access to indirect
140  * registers.
141  */
142 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
143 {
144 	/* This initial wait should normally not spin, as we always
145 	 * try to wait for indirect access to complete before
146 	 * releasing the indirect_lock.
147 	 */
148 	if (WARN_ON(temac_indirect_busywait(lp)))
149 		return -ETIMEDOUT;
150 	/* Initiate read from indirect register */
151 	temac_iow(lp, XTE_CTL0_OFFSET, reg);
152 	/* Wait for indirect register access to complete.  We really
153 	 * should not see timeouts, and could even end up causing
154 	 * problem for following indirect access, so let's make a bit
155 	 * of WARN noise.
156 	 */
157 	if (WARN_ON(temac_indirect_busywait(lp)))
158 		return -ETIMEDOUT;
159 	/* Value is ready now */
160 	return temac_ior(lp, XTE_LSW0_OFFSET);
161 }
162 
163 /**
164  * temac_indirect_out32 - Indirect register write access.  This function
165  * must be called without lp->indirect_lock being held.
166  */
167 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
168 {
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(lp->indirect_lock, flags);
172 	temac_indirect_out32_locked(lp, reg, value);
173 	spin_unlock_irqrestore(lp->indirect_lock, flags);
174 }
175 
176 /**
177  * temac_indirect_out32_locked - Indirect register write access.  This
178  * function must be called with lp->indirect_lock being held.  Use
179  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
180  * repeated lock/unlock and to ensure uninterrupted access to indirect
181  * registers.
182  */
183 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
184 {
185 	/* As in temac_indirect_in32_locked(), we should normally not
186 	 * spin here.  And if it happens, we actually end up silently
187 	 * ignoring the write request.  Ouch.
188 	 */
189 	if (WARN_ON(temac_indirect_busywait(lp)))
190 		return;
191 	/* Initiate write to indirect register */
192 	temac_iow(lp, XTE_LSW0_OFFSET, value);
193 	temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
194 	/* As in temac_indirect_in32_locked(), we should not see timeouts
195 	 * here.  And if it happens, we continue before the write has
196 	 * completed.  Not good.
197 	 */
198 	WARN_ON(temac_indirect_busywait(lp));
199 }
200 
201 /**
202  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
203  * register input that is based on DCR word addresses which are then
204  * converted to memory mapped byte addresses.  To be assigned to
205  * lp->dma_in32.
206  */
207 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
208 {
209 	return ioread32be(lp->sdma_regs + (reg << 2));
210 }
211 
212 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
213 {
214 	return ioread32(lp->sdma_regs + (reg << 2));
215 }
216 
217 /**
218  * temac_dma_out32_* - Memory mapped DMA read, these function expects
219  * a register input that is based on DCR word addresses which are then
220  * converted to memory mapped byte addresses.  To be assigned to
221  * lp->dma_out32.
222  */
223 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
224 {
225 	iowrite32be(value, lp->sdma_regs + (reg << 2));
226 }
227 
228 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
229 {
230 	iowrite32(value, lp->sdma_regs + (reg << 2));
231 }
232 
233 /* DMA register access functions can be DCR based or memory mapped.
234  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
235  * memory mapped.
236  */
237 #ifdef CONFIG_PPC_DCR
238 
239 /**
240  * temac_dma_dcr_in32 - DCR based DMA read
241  */
242 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
243 {
244 	return dcr_read(lp->sdma_dcrs, reg);
245 }
246 
247 /**
248  * temac_dma_dcr_out32 - DCR based DMA write
249  */
250 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
251 {
252 	dcr_write(lp->sdma_dcrs, reg, value);
253 }
254 
255 /**
256  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
257  * I/O  functions
258  */
259 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
260 				struct device_node *np)
261 {
262 	unsigned int dcrs;
263 
264 	/* setup the dcr address mapping if it's in the device tree */
265 
266 	dcrs = dcr_resource_start(np, 0);
267 	if (dcrs != 0) {
268 		lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
269 		lp->dma_in = temac_dma_dcr_in;
270 		lp->dma_out = temac_dma_dcr_out;
271 		dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
272 		return 0;
273 	}
274 	/* no DCR in the device tree, indicate a failure */
275 	return -1;
276 }
277 
278 #else
279 
280 /*
281  * temac_dcr_setup - This is a stub for when DCR is not supported,
282  * such as with MicroBlaze and x86
283  */
284 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
285 				struct device_node *np)
286 {
287 	return -1;
288 }
289 
290 #endif
291 
292 /**
293  * temac_dma_bd_release - Release buffer descriptor rings
294  */
295 static void temac_dma_bd_release(struct net_device *ndev)
296 {
297 	struct temac_local *lp = netdev_priv(ndev);
298 	int i;
299 
300 	/* Reset Local Link (DMA) */
301 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
302 
303 	for (i = 0; i < RX_BD_NUM; i++) {
304 		if (!lp->rx_skb[i])
305 			break;
306 		else {
307 			dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
308 					XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
309 			dev_kfree_skb(lp->rx_skb[i]);
310 		}
311 	}
312 	if (lp->rx_bd_v)
313 		dma_free_coherent(ndev->dev.parent,
314 				sizeof(*lp->rx_bd_v) * RX_BD_NUM,
315 				lp->rx_bd_v, lp->rx_bd_p);
316 	if (lp->tx_bd_v)
317 		dma_free_coherent(ndev->dev.parent,
318 				sizeof(*lp->tx_bd_v) * TX_BD_NUM,
319 				lp->tx_bd_v, lp->tx_bd_p);
320 }
321 
322 /**
323  * temac_dma_bd_init - Setup buffer descriptor rings
324  */
325 static int temac_dma_bd_init(struct net_device *ndev)
326 {
327 	struct temac_local *lp = netdev_priv(ndev);
328 	struct sk_buff *skb;
329 	dma_addr_t skb_dma_addr;
330 	int i;
331 
332 	lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
333 				  GFP_KERNEL);
334 	if (!lp->rx_skb)
335 		goto out;
336 
337 	/* allocate the tx and rx ring buffer descriptors. */
338 	/* returns a virtual address and a physical address. */
339 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
341 					 &lp->tx_bd_p, GFP_KERNEL);
342 	if (!lp->tx_bd_v)
343 		goto out;
344 
345 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346 					 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
347 					 &lp->rx_bd_p, GFP_KERNEL);
348 	if (!lp->rx_bd_v)
349 		goto out;
350 
351 	for (i = 0; i < TX_BD_NUM; i++) {
352 		lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353 				+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
354 	}
355 
356 	for (i = 0; i < RX_BD_NUM; i++) {
357 		lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358 				+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
359 
360 		skb = netdev_alloc_skb_ip_align(ndev,
361 						XTE_MAX_JUMBO_FRAME_SIZE);
362 		if (!skb)
363 			goto out;
364 
365 		lp->rx_skb[i] = skb;
366 		/* returns physical address of skb->data */
367 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
368 					      XTE_MAX_JUMBO_FRAME_SIZE,
369 					      DMA_FROM_DEVICE);
370 		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
371 		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
372 		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
373 	}
374 
375 	/* Configure DMA channel (irq setup) */
376 	lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
377 		    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
378 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
379 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
380 	lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
381 		    CHNL_CTRL_IRQ_IOE |
382 		    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
383 		    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
384 
385 	/* Init descriptor indexes */
386 	lp->tx_bd_ci = 0;
387 	lp->tx_bd_next = 0;
388 	lp->tx_bd_tail = 0;
389 	lp->rx_bd_ci = 0;
390 
391 	/* Enable RX DMA transfers */
392 	wmb();
393 	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
394 	lp->dma_out(lp, RX_TAILDESC_PTR,
395 		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
396 
397 	/* Prepare for TX DMA transfer */
398 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
399 
400 	return 0;
401 
402 out:
403 	temac_dma_bd_release(ndev);
404 	return -ENOMEM;
405 }
406 
407 /* ---------------------------------------------------------------------
408  * net_device_ops
409  */
410 
411 static void temac_do_set_mac_address(struct net_device *ndev)
412 {
413 	struct temac_local *lp = netdev_priv(ndev);
414 	unsigned long flags;
415 
416 	/* set up unicast MAC address filter set its mac address */
417 	spin_lock_irqsave(lp->indirect_lock, flags);
418 	temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
419 				    (ndev->dev_addr[0]) |
420 				    (ndev->dev_addr[1] << 8) |
421 				    (ndev->dev_addr[2] << 16) |
422 				    (ndev->dev_addr[3] << 24));
423 	/* There are reserved bits in EUAW1
424 	 * so don't affect them Set MAC bits [47:32] in EUAW1 */
425 	temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
426 				    (ndev->dev_addr[4] & 0x000000ff) |
427 				    (ndev->dev_addr[5] << 8));
428 	spin_unlock_irqrestore(lp->indirect_lock, flags);
429 }
430 
431 static int temac_init_mac_address(struct net_device *ndev, const void *address)
432 {
433 	ether_addr_copy(ndev->dev_addr, address);
434 	if (!is_valid_ether_addr(ndev->dev_addr))
435 		eth_hw_addr_random(ndev);
436 	temac_do_set_mac_address(ndev);
437 	return 0;
438 }
439 
440 static int temac_set_mac_address(struct net_device *ndev, void *p)
441 {
442 	struct sockaddr *addr = p;
443 
444 	if (!is_valid_ether_addr(addr->sa_data))
445 		return -EADDRNOTAVAIL;
446 	memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
447 	temac_do_set_mac_address(ndev);
448 	return 0;
449 }
450 
451 static void temac_set_multicast_list(struct net_device *ndev)
452 {
453 	struct temac_local *lp = netdev_priv(ndev);
454 	u32 multi_addr_msw, multi_addr_lsw;
455 	int i = 0;
456 	unsigned long flags;
457 	bool promisc_mode_disabled = false;
458 
459 	if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
460 	    (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
461 		temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
462 		dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
463 		return;
464 	}
465 
466 	spin_lock_irqsave(lp->indirect_lock, flags);
467 
468 	if (!netdev_mc_empty(ndev)) {
469 		struct netdev_hw_addr *ha;
470 
471 		netdev_for_each_mc_addr(ha, ndev) {
472 			if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
473 				break;
474 			multi_addr_msw = ((ha->addr[3] << 24) |
475 					  (ha->addr[2] << 16) |
476 					  (ha->addr[1] << 8) |
477 					  (ha->addr[0]));
478 			temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
479 						    multi_addr_msw);
480 			multi_addr_lsw = ((ha->addr[5] << 8) |
481 					  (ha->addr[4]) | (i << 16));
482 			temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
483 						    multi_addr_lsw);
484 			i++;
485 		}
486 	}
487 
488 	/* Clear all or remaining/unused address table entries */
489 	while (i < MULTICAST_CAM_TABLE_NUM) {
490 		temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
491 		temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
492 		i++;
493 	}
494 
495 	/* Enable address filter block if currently disabled */
496 	if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
497 	    & XTE_AFM_EPPRM_MASK) {
498 		temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
499 		promisc_mode_disabled = true;
500 	}
501 
502 	spin_unlock_irqrestore(lp->indirect_lock, flags);
503 
504 	if (promisc_mode_disabled)
505 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
506 }
507 
508 static struct temac_option {
509 	int flg;
510 	u32 opt;
511 	u32 reg;
512 	u32 m_or;
513 	u32 m_and;
514 } temac_options[] = {
515 	/* Turn on jumbo packet support for both Rx and Tx */
516 	{
517 		.opt = XTE_OPTION_JUMBO,
518 		.reg = XTE_TXC_OFFSET,
519 		.m_or = XTE_TXC_TXJMBO_MASK,
520 	},
521 	{
522 		.opt = XTE_OPTION_JUMBO,
523 		.reg = XTE_RXC1_OFFSET,
524 		.m_or =XTE_RXC1_RXJMBO_MASK,
525 	},
526 	/* Turn on VLAN packet support for both Rx and Tx */
527 	{
528 		.opt = XTE_OPTION_VLAN,
529 		.reg = XTE_TXC_OFFSET,
530 		.m_or =XTE_TXC_TXVLAN_MASK,
531 	},
532 	{
533 		.opt = XTE_OPTION_VLAN,
534 		.reg = XTE_RXC1_OFFSET,
535 		.m_or =XTE_RXC1_RXVLAN_MASK,
536 	},
537 	/* Turn on FCS stripping on receive packets */
538 	{
539 		.opt = XTE_OPTION_FCS_STRIP,
540 		.reg = XTE_RXC1_OFFSET,
541 		.m_or =XTE_RXC1_RXFCS_MASK,
542 	},
543 	/* Turn on FCS insertion on transmit packets */
544 	{
545 		.opt = XTE_OPTION_FCS_INSERT,
546 		.reg = XTE_TXC_OFFSET,
547 		.m_or =XTE_TXC_TXFCS_MASK,
548 	},
549 	/* Turn on length/type field checking on receive packets */
550 	{
551 		.opt = XTE_OPTION_LENTYPE_ERR,
552 		.reg = XTE_RXC1_OFFSET,
553 		.m_or =XTE_RXC1_RXLT_MASK,
554 	},
555 	/* Turn on flow control */
556 	{
557 		.opt = XTE_OPTION_FLOW_CONTROL,
558 		.reg = XTE_FCC_OFFSET,
559 		.m_or =XTE_FCC_RXFLO_MASK,
560 	},
561 	/* Turn on flow control */
562 	{
563 		.opt = XTE_OPTION_FLOW_CONTROL,
564 		.reg = XTE_FCC_OFFSET,
565 		.m_or =XTE_FCC_TXFLO_MASK,
566 	},
567 	/* Turn on promiscuous frame filtering (all frames are received ) */
568 	{
569 		.opt = XTE_OPTION_PROMISC,
570 		.reg = XTE_AFM_OFFSET,
571 		.m_or =XTE_AFM_EPPRM_MASK,
572 	},
573 	/* Enable transmitter if not already enabled */
574 	{
575 		.opt = XTE_OPTION_TXEN,
576 		.reg = XTE_TXC_OFFSET,
577 		.m_or =XTE_TXC_TXEN_MASK,
578 	},
579 	/* Enable receiver? */
580 	{
581 		.opt = XTE_OPTION_RXEN,
582 		.reg = XTE_RXC1_OFFSET,
583 		.m_or =XTE_RXC1_RXEN_MASK,
584 	},
585 	{}
586 };
587 
588 /**
589  * temac_setoptions
590  */
591 static u32 temac_setoptions(struct net_device *ndev, u32 options)
592 {
593 	struct temac_local *lp = netdev_priv(ndev);
594 	struct temac_option *tp = &temac_options[0];
595 	int reg;
596 	unsigned long flags;
597 
598 	spin_lock_irqsave(lp->indirect_lock, flags);
599 	while (tp->opt) {
600 		reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
601 		if (options & tp->opt) {
602 			reg |= tp->m_or;
603 			temac_indirect_out32_locked(lp, tp->reg, reg);
604 		}
605 		tp++;
606 	}
607 	spin_unlock_irqrestore(lp->indirect_lock, flags);
608 	lp->options |= options;
609 
610 	return 0;
611 }
612 
613 /* Initialize temac */
614 static void temac_device_reset(struct net_device *ndev)
615 {
616 	struct temac_local *lp = netdev_priv(ndev);
617 	u32 timeout;
618 	u32 val;
619 	unsigned long flags;
620 
621 	/* Perform a software reset */
622 
623 	/* 0x300 host enable bit ? */
624 	/* reset PHY through control register ?:1 */
625 
626 	dev_dbg(&ndev->dev, "%s()\n", __func__);
627 
628 	/* Reset the receiver and wait for it to finish reset */
629 	temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
630 	timeout = 1000;
631 	while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
632 		udelay(1);
633 		if (--timeout == 0) {
634 			dev_err(&ndev->dev,
635 				"temac_device_reset RX reset timeout!!\n");
636 			break;
637 		}
638 	}
639 
640 	/* Reset the transmitter and wait for it to finish reset */
641 	temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
642 	timeout = 1000;
643 	while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
644 		udelay(1);
645 		if (--timeout == 0) {
646 			dev_err(&ndev->dev,
647 				"temac_device_reset TX reset timeout!!\n");
648 			break;
649 		}
650 	}
651 
652 	/* Disable the receiver */
653 	spin_lock_irqsave(lp->indirect_lock, flags);
654 	val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
655 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
656 				    val & ~XTE_RXC1_RXEN_MASK);
657 	spin_unlock_irqrestore(lp->indirect_lock, flags);
658 
659 	/* Reset Local Link (DMA) */
660 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
661 	timeout = 1000;
662 	while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
663 		udelay(1);
664 		if (--timeout == 0) {
665 			dev_err(&ndev->dev,
666 				"temac_device_reset DMA reset timeout!!\n");
667 			break;
668 		}
669 	}
670 	lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
671 
672 	if (temac_dma_bd_init(ndev)) {
673 		dev_err(&ndev->dev,
674 				"temac_device_reset descriptor allocation failed\n");
675 	}
676 
677 	spin_lock_irqsave(lp->indirect_lock, flags);
678 	temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
679 	temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
680 	temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
681 	temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
682 	spin_unlock_irqrestore(lp->indirect_lock, flags);
683 
684 	/* Sync default options with HW
685 	 * but leave receiver and transmitter disabled.  */
686 	temac_setoptions(ndev,
687 			 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
688 
689 	temac_do_set_mac_address(ndev);
690 
691 	/* Set address filter table */
692 	temac_set_multicast_list(ndev);
693 	if (temac_setoptions(ndev, lp->options))
694 		dev_err(&ndev->dev, "Error setting TEMAC options\n");
695 
696 	/* Init Driver variable */
697 	netif_trans_update(ndev); /* prevent tx timeout */
698 }
699 
700 static void temac_adjust_link(struct net_device *ndev)
701 {
702 	struct temac_local *lp = netdev_priv(ndev);
703 	struct phy_device *phy = ndev->phydev;
704 	u32 mii_speed;
705 	int link_state;
706 	unsigned long flags;
707 
708 	/* hash together the state values to decide if something has changed */
709 	link_state = phy->speed | (phy->duplex << 1) | phy->link;
710 
711 	if (lp->last_link != link_state) {
712 		spin_lock_irqsave(lp->indirect_lock, flags);
713 		mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
714 		mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
715 
716 		switch (phy->speed) {
717 		case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
718 		case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
719 		case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
720 		}
721 
722 		/* Write new speed setting out to TEMAC */
723 		temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
724 		spin_unlock_irqrestore(lp->indirect_lock, flags);
725 
726 		lp->last_link = link_state;
727 		phy_print_status(phy);
728 	}
729 }
730 
731 #ifdef CONFIG_64BIT
732 
733 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
734 {
735 	bd->app3 = (u32)(((u64)p) >> 32);
736 	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
737 }
738 
739 static void *ptr_from_txbd(struct cdmac_bd *bd)
740 {
741 	return (void *)(((u64)(bd->app3) << 32) | bd->app4);
742 }
743 
744 #else
745 
746 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
747 {
748 	bd->app4 = (u32)p;
749 }
750 
751 static void *ptr_from_txbd(struct cdmac_bd *bd)
752 {
753 	return (void *)(bd->app4);
754 }
755 
756 #endif
757 
758 static void temac_start_xmit_done(struct net_device *ndev)
759 {
760 	struct temac_local *lp = netdev_priv(ndev);
761 	struct cdmac_bd *cur_p;
762 	unsigned int stat = 0;
763 	struct sk_buff *skb;
764 
765 	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
766 	stat = be32_to_cpu(cur_p->app0);
767 
768 	while (stat & STS_CTRL_APP0_CMPLT) {
769 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
770 				 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
771 		skb = (struct sk_buff *)ptr_from_txbd(cur_p);
772 		if (skb)
773 			dev_consume_skb_irq(skb);
774 		cur_p->app0 = 0;
775 		cur_p->app1 = 0;
776 		cur_p->app2 = 0;
777 		cur_p->app3 = 0;
778 		cur_p->app4 = 0;
779 
780 		ndev->stats.tx_packets++;
781 		ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
782 
783 		lp->tx_bd_ci++;
784 		if (lp->tx_bd_ci >= TX_BD_NUM)
785 			lp->tx_bd_ci = 0;
786 
787 		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
788 		stat = be32_to_cpu(cur_p->app0);
789 	}
790 
791 	netif_wake_queue(ndev);
792 }
793 
794 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
795 {
796 	struct cdmac_bd *cur_p;
797 	int tail;
798 
799 	tail = lp->tx_bd_tail;
800 	cur_p = &lp->tx_bd_v[tail];
801 
802 	do {
803 		if (cur_p->app0)
804 			return NETDEV_TX_BUSY;
805 
806 		tail++;
807 		if (tail >= TX_BD_NUM)
808 			tail = 0;
809 
810 		cur_p = &lp->tx_bd_v[tail];
811 		num_frag--;
812 	} while (num_frag >= 0);
813 
814 	return 0;
815 }
816 
817 static netdev_tx_t
818 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
819 {
820 	struct temac_local *lp = netdev_priv(ndev);
821 	struct cdmac_bd *cur_p;
822 	dma_addr_t start_p, tail_p, skb_dma_addr;
823 	int ii;
824 	unsigned long num_frag;
825 	skb_frag_t *frag;
826 
827 	num_frag = skb_shinfo(skb)->nr_frags;
828 	frag = &skb_shinfo(skb)->frags[0];
829 	start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
830 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
831 
832 	if (temac_check_tx_bd_space(lp, num_frag + 1)) {
833 		if (!netif_queue_stopped(ndev))
834 			netif_stop_queue(ndev);
835 		return NETDEV_TX_BUSY;
836 	}
837 
838 	cur_p->app0 = 0;
839 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
840 		unsigned int csum_start_off = skb_checksum_start_offset(skb);
841 		unsigned int csum_index_off = csum_start_off + skb->csum_offset;
842 
843 		cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
844 		cur_p->app1 = cpu_to_be32((csum_start_off << 16)
845 					  | csum_index_off);
846 		cur_p->app2 = 0;  /* initial checksum seed */
847 	}
848 
849 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
850 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
851 				      skb_headlen(skb), DMA_TO_DEVICE);
852 	cur_p->len = cpu_to_be32(skb_headlen(skb));
853 	cur_p->phys = cpu_to_be32(skb_dma_addr);
854 	ptr_to_txbd((void *)skb, cur_p);
855 
856 	for (ii = 0; ii < num_frag; ii++) {
857 		lp->tx_bd_tail++;
858 		if (lp->tx_bd_tail >= TX_BD_NUM)
859 			lp->tx_bd_tail = 0;
860 
861 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
862 		skb_dma_addr = dma_map_single(ndev->dev.parent,
863 					      skb_frag_address(frag),
864 					      skb_frag_size(frag),
865 					      DMA_TO_DEVICE);
866 		cur_p->phys = cpu_to_be32(skb_dma_addr);
867 		cur_p->len = cpu_to_be32(skb_frag_size(frag));
868 		cur_p->app0 = 0;
869 		frag++;
870 	}
871 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
872 
873 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
874 	lp->tx_bd_tail++;
875 	if (lp->tx_bd_tail >= TX_BD_NUM)
876 		lp->tx_bd_tail = 0;
877 
878 	skb_tx_timestamp(skb);
879 
880 	/* Kick off the transfer */
881 	wmb();
882 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
883 
884 	return NETDEV_TX_OK;
885 }
886 
887 
888 static void ll_temac_recv(struct net_device *ndev)
889 {
890 	struct temac_local *lp = netdev_priv(ndev);
891 	struct sk_buff *skb, *new_skb;
892 	unsigned int bdstat;
893 	struct cdmac_bd *cur_p;
894 	dma_addr_t tail_p, skb_dma_addr;
895 	int length;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&lp->rx_lock, flags);
899 
900 	tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
901 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
902 
903 	bdstat = be32_to_cpu(cur_p->app0);
904 	while ((bdstat & STS_CTRL_APP0_CMPLT)) {
905 
906 		skb = lp->rx_skb[lp->rx_bd_ci];
907 		length = be32_to_cpu(cur_p->app4) & 0x3FFF;
908 
909 		dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
910 				 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
911 
912 		skb_put(skb, length);
913 		skb->protocol = eth_type_trans(skb, ndev);
914 		skb_checksum_none_assert(skb);
915 
916 		/* if we're doing rx csum offload, set it up */
917 		if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
918 		    (skb->protocol == htons(ETH_P_IP)) &&
919 		    (skb->len > 64)) {
920 
921 			/* Convert from device endianness (be32) to cpu
922 			 * endiannes, and if necessary swap the bytes
923 			 * (back) for proper IP checksum byte order
924 			 * (be16).
925 			 */
926 			skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
927 			skb->ip_summed = CHECKSUM_COMPLETE;
928 		}
929 
930 		if (!skb_defer_rx_timestamp(skb))
931 			netif_rx(skb);
932 
933 		ndev->stats.rx_packets++;
934 		ndev->stats.rx_bytes += length;
935 
936 		new_skb = netdev_alloc_skb_ip_align(ndev,
937 						XTE_MAX_JUMBO_FRAME_SIZE);
938 		if (!new_skb) {
939 			spin_unlock_irqrestore(&lp->rx_lock, flags);
940 			return;
941 		}
942 
943 		cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
944 		skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
945 					      XTE_MAX_JUMBO_FRAME_SIZE,
946 					      DMA_FROM_DEVICE);
947 		cur_p->phys = cpu_to_be32(skb_dma_addr);
948 		cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
949 		lp->rx_skb[lp->rx_bd_ci] = new_skb;
950 
951 		lp->rx_bd_ci++;
952 		if (lp->rx_bd_ci >= RX_BD_NUM)
953 			lp->rx_bd_ci = 0;
954 
955 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
956 		bdstat = be32_to_cpu(cur_p->app0);
957 	}
958 	lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
959 
960 	spin_unlock_irqrestore(&lp->rx_lock, flags);
961 }
962 
963 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
964 {
965 	struct net_device *ndev = _ndev;
966 	struct temac_local *lp = netdev_priv(ndev);
967 	unsigned int status;
968 
969 	status = lp->dma_in(lp, TX_IRQ_REG);
970 	lp->dma_out(lp, TX_IRQ_REG, status);
971 
972 	if (status & (IRQ_COAL | IRQ_DLY))
973 		temac_start_xmit_done(lp->ndev);
974 	if (status & (IRQ_ERR | IRQ_DMAERR))
975 		dev_err_ratelimited(&ndev->dev,
976 				    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
977 				    status, lp->dma_in(lp, TX_CHNL_STS));
978 
979 	return IRQ_HANDLED;
980 }
981 
982 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
983 {
984 	struct net_device *ndev = _ndev;
985 	struct temac_local *lp = netdev_priv(ndev);
986 	unsigned int status;
987 
988 	/* Read and clear the status registers */
989 	status = lp->dma_in(lp, RX_IRQ_REG);
990 	lp->dma_out(lp, RX_IRQ_REG, status);
991 
992 	if (status & (IRQ_COAL | IRQ_DLY))
993 		ll_temac_recv(lp->ndev);
994 	if (status & (IRQ_ERR | IRQ_DMAERR))
995 		dev_err_ratelimited(&ndev->dev,
996 				    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
997 				    status, lp->dma_in(lp, RX_CHNL_STS));
998 
999 	return IRQ_HANDLED;
1000 }
1001 
1002 static int temac_open(struct net_device *ndev)
1003 {
1004 	struct temac_local *lp = netdev_priv(ndev);
1005 	struct phy_device *phydev = NULL;
1006 	int rc;
1007 
1008 	dev_dbg(&ndev->dev, "temac_open()\n");
1009 
1010 	if (lp->phy_node) {
1011 		phydev = of_phy_connect(lp->ndev, lp->phy_node,
1012 					temac_adjust_link, 0, 0);
1013 		if (!phydev) {
1014 			dev_err(lp->dev, "of_phy_connect() failed\n");
1015 			return -ENODEV;
1016 		}
1017 		phy_start(phydev);
1018 	} else if (strlen(lp->phy_name) > 0) {
1019 		phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1020 				     lp->phy_interface);
1021 		if (IS_ERR(phydev)) {
1022 			dev_err(lp->dev, "phy_connect() failed\n");
1023 			return PTR_ERR(phydev);
1024 		}
1025 		phy_start(phydev);
1026 	}
1027 
1028 	temac_device_reset(ndev);
1029 
1030 	rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1031 	if (rc)
1032 		goto err_tx_irq;
1033 	rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1034 	if (rc)
1035 		goto err_rx_irq;
1036 
1037 	return 0;
1038 
1039  err_rx_irq:
1040 	free_irq(lp->tx_irq, ndev);
1041  err_tx_irq:
1042 	if (phydev)
1043 		phy_disconnect(phydev);
1044 	dev_err(lp->dev, "request_irq() failed\n");
1045 	return rc;
1046 }
1047 
1048 static int temac_stop(struct net_device *ndev)
1049 {
1050 	struct temac_local *lp = netdev_priv(ndev);
1051 	struct phy_device *phydev = ndev->phydev;
1052 
1053 	dev_dbg(&ndev->dev, "temac_close()\n");
1054 
1055 	free_irq(lp->tx_irq, ndev);
1056 	free_irq(lp->rx_irq, ndev);
1057 
1058 	if (phydev)
1059 		phy_disconnect(phydev);
1060 
1061 	temac_dma_bd_release(ndev);
1062 
1063 	return 0;
1064 }
1065 
1066 #ifdef CONFIG_NET_POLL_CONTROLLER
1067 static void
1068 temac_poll_controller(struct net_device *ndev)
1069 {
1070 	struct temac_local *lp = netdev_priv(ndev);
1071 
1072 	disable_irq(lp->tx_irq);
1073 	disable_irq(lp->rx_irq);
1074 
1075 	ll_temac_rx_irq(lp->tx_irq, ndev);
1076 	ll_temac_tx_irq(lp->rx_irq, ndev);
1077 
1078 	enable_irq(lp->tx_irq);
1079 	enable_irq(lp->rx_irq);
1080 }
1081 #endif
1082 
1083 static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1084 {
1085 	if (!netif_running(ndev))
1086 		return -EINVAL;
1087 
1088 	if (!ndev->phydev)
1089 		return -EINVAL;
1090 
1091 	return phy_mii_ioctl(ndev->phydev, rq, cmd);
1092 }
1093 
1094 static const struct net_device_ops temac_netdev_ops = {
1095 	.ndo_open = temac_open,
1096 	.ndo_stop = temac_stop,
1097 	.ndo_start_xmit = temac_start_xmit,
1098 	.ndo_set_rx_mode = temac_set_multicast_list,
1099 	.ndo_set_mac_address = temac_set_mac_address,
1100 	.ndo_validate_addr = eth_validate_addr,
1101 	.ndo_do_ioctl = temac_ioctl,
1102 #ifdef CONFIG_NET_POLL_CONTROLLER
1103 	.ndo_poll_controller = temac_poll_controller,
1104 #endif
1105 };
1106 
1107 /* ---------------------------------------------------------------------
1108  * SYSFS device attributes
1109  */
1110 static ssize_t temac_show_llink_regs(struct device *dev,
1111 				     struct device_attribute *attr, char *buf)
1112 {
1113 	struct net_device *ndev = dev_get_drvdata(dev);
1114 	struct temac_local *lp = netdev_priv(ndev);
1115 	int i, len = 0;
1116 
1117 	for (i = 0; i < 0x11; i++)
1118 		len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1119 			       (i % 8) == 7 ? "\n" : " ");
1120 	len += sprintf(buf + len, "\n");
1121 
1122 	return len;
1123 }
1124 
1125 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1126 
1127 static struct attribute *temac_device_attrs[] = {
1128 	&dev_attr_llink_regs.attr,
1129 	NULL,
1130 };
1131 
1132 static const struct attribute_group temac_attr_group = {
1133 	.attrs = temac_device_attrs,
1134 };
1135 
1136 /* ethtool support */
1137 static const struct ethtool_ops temac_ethtool_ops = {
1138 	.nway_reset = phy_ethtool_nway_reset,
1139 	.get_link = ethtool_op_get_link,
1140 	.get_ts_info = ethtool_op_get_ts_info,
1141 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1142 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1143 };
1144 
1145 static int temac_probe(struct platform_device *pdev)
1146 {
1147 	struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1148 	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1149 	struct temac_local *lp;
1150 	struct net_device *ndev;
1151 	struct resource *res;
1152 	const void *addr;
1153 	__be32 *p;
1154 	bool little_endian;
1155 	int rc = 0;
1156 
1157 	/* Init network device structure */
1158 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1159 	if (!ndev)
1160 		return -ENOMEM;
1161 
1162 	platform_set_drvdata(pdev, ndev);
1163 	SET_NETDEV_DEV(ndev, &pdev->dev);
1164 	ndev->features = NETIF_F_SG;
1165 	ndev->netdev_ops = &temac_netdev_ops;
1166 	ndev->ethtool_ops = &temac_ethtool_ops;
1167 #if 0
1168 	ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1169 	ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1170 	ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1171 	ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1172 	ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1173 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1174 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1175 	ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1176 	ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1177 	ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1178 	ndev->features |= NETIF_F_LRO; /* large receive offload */
1179 #endif
1180 
1181 	/* setup temac private info structure */
1182 	lp = netdev_priv(ndev);
1183 	lp->ndev = ndev;
1184 	lp->dev = &pdev->dev;
1185 	lp->options = XTE_OPTION_DEFAULTS;
1186 	spin_lock_init(&lp->rx_lock);
1187 
1188 	/* Setup mutex for synchronization of indirect register access */
1189 	if (pdata) {
1190 		if (!pdata->indirect_lock) {
1191 			dev_err(&pdev->dev,
1192 				"indirect_lock missing in platform_data\n");
1193 			return -EINVAL;
1194 		}
1195 		lp->indirect_lock = pdata->indirect_lock;
1196 	} else {
1197 		lp->indirect_lock = devm_kmalloc(&pdev->dev,
1198 						 sizeof(*lp->indirect_lock),
1199 						 GFP_KERNEL);
1200 		spin_lock_init(lp->indirect_lock);
1201 	}
1202 
1203 	/* map device registers */
1204 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1205 	lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
1206 					resource_size(res));
1207 	if (IS_ERR(lp->regs)) {
1208 		dev_err(&pdev->dev, "could not map TEMAC registers\n");
1209 		return PTR_ERR(lp->regs);
1210 	}
1211 
1212 	/* Select register access functions with the specified
1213 	 * endianness mode.  Default for OF devices is big-endian.
1214 	 */
1215 	little_endian = false;
1216 	if (temac_np) {
1217 		if (of_get_property(temac_np, "little-endian", NULL))
1218 			little_endian = true;
1219 	} else if (pdata) {
1220 		little_endian = pdata->reg_little_endian;
1221 	}
1222 	if (little_endian) {
1223 		lp->temac_ior = _temac_ior_le;
1224 		lp->temac_iow = _temac_iow_le;
1225 	} else {
1226 		lp->temac_ior = _temac_ior_be;
1227 		lp->temac_iow = _temac_iow_be;
1228 	}
1229 
1230 	/* Setup checksum offload, but default to off if not specified */
1231 	lp->temac_features = 0;
1232 	if (temac_np) {
1233 		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1234 		if (p && be32_to_cpu(*p))
1235 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1236 		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1237 		if (p && be32_to_cpu(*p))
1238 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1239 	} else if (pdata) {
1240 		if (pdata->txcsum)
1241 			lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1242 		if (pdata->rxcsum)
1243 			lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1244 	}
1245 	if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1246 		/* Can checksum TCP/UDP over IPv4. */
1247 		ndev->features |= NETIF_F_IP_CSUM;
1248 
1249 	/* Setup LocalLink DMA */
1250 	if (temac_np) {
1251 		/* Find the DMA node, map the DMA registers, and
1252 		 * decode the DMA IRQs.
1253 		 */
1254 		dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1255 		if (!dma_np) {
1256 			dev_err(&pdev->dev, "could not find DMA node\n");
1257 			return -ENODEV;
1258 		}
1259 
1260 		/* Setup the DMA register accesses, could be DCR or
1261 		 * memory mapped.
1262 		 */
1263 		if (temac_dcr_setup(lp, pdev, dma_np)) {
1264 			/* no DCR in the device tree, try non-DCR */
1265 			lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1266 						      NULL);
1267 			if (IS_ERR(lp->sdma_regs)) {
1268 				dev_err(&pdev->dev,
1269 					"unable to map DMA registers\n");
1270 				of_node_put(dma_np);
1271 				return PTR_ERR(lp->sdma_regs);
1272 			}
1273 			if (of_get_property(dma_np, "little-endian", NULL)) {
1274 				lp->dma_in = temac_dma_in32_le;
1275 				lp->dma_out = temac_dma_out32_le;
1276 			} else {
1277 				lp->dma_in = temac_dma_in32_be;
1278 				lp->dma_out = temac_dma_out32_be;
1279 			}
1280 			dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1281 		}
1282 
1283 		/* Get DMA RX and TX interrupts */
1284 		lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1285 		lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1286 
1287 		/* Use defaults for IRQ delay/coalescing setup.  These
1288 		 * are configuration values, so does not belong in
1289 		 * device-tree.
1290 		 */
1291 		lp->tx_chnl_ctrl = 0x10220000;
1292 		lp->rx_chnl_ctrl = 0xff070000;
1293 
1294 		/* Finished with the DMA node; drop the reference */
1295 		of_node_put(dma_np);
1296 	} else if (pdata) {
1297 		/* 2nd memory resource specifies DMA registers */
1298 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1299 		lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
1300 						     resource_size(res));
1301 		if (IS_ERR(lp->sdma_regs)) {
1302 			dev_err(&pdev->dev,
1303 				"could not map DMA registers\n");
1304 			return PTR_ERR(lp->sdma_regs);
1305 		}
1306 		if (pdata->dma_little_endian) {
1307 			lp->dma_in = temac_dma_in32_le;
1308 			lp->dma_out = temac_dma_out32_le;
1309 		} else {
1310 			lp->dma_in = temac_dma_in32_be;
1311 			lp->dma_out = temac_dma_out32_be;
1312 		}
1313 
1314 		/* Get DMA RX and TX interrupts */
1315 		lp->rx_irq = platform_get_irq(pdev, 0);
1316 		lp->tx_irq = platform_get_irq(pdev, 1);
1317 
1318 		/* IRQ delay/coalescing setup */
1319 		if (pdata->tx_irq_timeout || pdata->tx_irq_count)
1320 			lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
1321 				(pdata->tx_irq_count << 16);
1322 		else
1323 			lp->tx_chnl_ctrl = 0x10220000;
1324 		if (pdata->rx_irq_timeout || pdata->rx_irq_count)
1325 			lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1326 				(pdata->rx_irq_count << 16);
1327 		else
1328 			lp->rx_chnl_ctrl = 0xff070000;
1329 	}
1330 
1331 	/* Error handle returned DMA RX and TX interrupts */
1332 	if (lp->rx_irq < 0) {
1333 		if (lp->rx_irq != -EPROBE_DEFER)
1334 			dev_err(&pdev->dev, "could not get DMA RX irq\n");
1335 		return lp->rx_irq;
1336 	}
1337 	if (lp->tx_irq < 0) {
1338 		if (lp->tx_irq != -EPROBE_DEFER)
1339 			dev_err(&pdev->dev, "could not get DMA TX irq\n");
1340 		return lp->tx_irq;
1341 	}
1342 
1343 	if (temac_np) {
1344 		/* Retrieve the MAC address */
1345 		addr = of_get_mac_address(temac_np);
1346 		if (IS_ERR(addr)) {
1347 			dev_err(&pdev->dev, "could not find MAC address\n");
1348 			return -ENODEV;
1349 		}
1350 		temac_init_mac_address(ndev, addr);
1351 	} else if (pdata) {
1352 		temac_init_mac_address(ndev, pdata->mac_addr);
1353 	}
1354 
1355 	rc = temac_mdio_setup(lp, pdev);
1356 	if (rc)
1357 		dev_warn(&pdev->dev, "error registering MDIO bus\n");
1358 
1359 	if (temac_np) {
1360 		lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1361 		if (lp->phy_node)
1362 			dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1363 	} else if (pdata) {
1364 		snprintf(lp->phy_name, sizeof(lp->phy_name),
1365 			 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1366 		lp->phy_interface = pdata->phy_interface;
1367 	}
1368 
1369 	/* Add the device attributes */
1370 	rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1371 	if (rc) {
1372 		dev_err(lp->dev, "Error creating sysfs files\n");
1373 		goto err_sysfs_create;
1374 	}
1375 
1376 	rc = register_netdev(lp->ndev);
1377 	if (rc) {
1378 		dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1379 		goto err_register_ndev;
1380 	}
1381 
1382 	return 0;
1383 
1384 err_register_ndev:
1385 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1386 err_sysfs_create:
1387 	if (lp->phy_node)
1388 		of_node_put(lp->phy_node);
1389 	temac_mdio_teardown(lp);
1390 	return rc;
1391 }
1392 
1393 static int temac_remove(struct platform_device *pdev)
1394 {
1395 	struct net_device *ndev = platform_get_drvdata(pdev);
1396 	struct temac_local *lp = netdev_priv(ndev);
1397 
1398 	unregister_netdev(ndev);
1399 	sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1400 	if (lp->phy_node)
1401 		of_node_put(lp->phy_node);
1402 	temac_mdio_teardown(lp);
1403 	return 0;
1404 }
1405 
1406 static const struct of_device_id temac_of_match[] = {
1407 	{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
1408 	{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
1409 	{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
1410 	{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
1411 	{},
1412 };
1413 MODULE_DEVICE_TABLE(of, temac_of_match);
1414 
1415 static struct platform_driver temac_driver = {
1416 	.probe = temac_probe,
1417 	.remove = temac_remove,
1418 	.driver = {
1419 		.name = "xilinx_temac",
1420 		.of_match_table = temac_of_match,
1421 	},
1422 };
1423 
1424 module_platform_driver(temac_driver);
1425 
1426 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1427 MODULE_AUTHOR("Yoshio Kashiwagi");
1428 MODULE_LICENSE("GPL");
1429