1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
3  *
4  * Copyright (c) 2014, Applied Micro Circuits Corporation
5  * Authors: Iyappan Subramanian <isubramanian@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  */
8 
9 #include <linux/of_gpio.h>
10 #include <linux/gpio.h>
11 #include "xgene_enet_main.h"
12 #include "xgene_enet_hw.h"
13 #include "xgene_enet_xgmac.h"
14 
xgene_enet_wr_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)15 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
16 			      u32 offset, u32 val)
17 {
18 	void __iomem *addr = pdata->eth_csr_addr + offset;
19 
20 	iowrite32(val, addr);
21 }
22 
xgene_enet_wr_ring_if(struct xgene_enet_pdata * pdata,u32 offset,u32 val)23 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
24 				  u32 offset, u32 val)
25 {
26 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
27 
28 	iowrite32(val, addr);
29 }
30 
xgene_enet_wr_diag_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)31 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
32 				   u32 offset, u32 val)
33 {
34 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
35 
36 	iowrite32(val, addr);
37 }
38 
xgene_enet_wr_indirect(void __iomem * addr,void __iomem * wr,void __iomem * cmd,void __iomem * cmd_done,u32 wr_addr,u32 wr_data)39 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
40 				   void __iomem *cmd, void __iomem *cmd_done,
41 				   u32 wr_addr, u32 wr_data)
42 {
43 	u32 done;
44 	u8 wait = 10;
45 
46 	iowrite32(wr_addr, addr);
47 	iowrite32(wr_data, wr);
48 	iowrite32(XGENE_ENET_WR_CMD, cmd);
49 
50 	/* wait for write command to complete */
51 	while (!(done = ioread32(cmd_done)) && wait--)
52 		udelay(1);
53 
54 	if (!done)
55 		return false;
56 
57 	iowrite32(0, cmd);
58 
59 	return true;
60 }
61 
xgene_enet_wr_pcs(struct xgene_enet_pdata * pdata,u32 wr_addr,u32 wr_data)62 static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
63 			      u32 wr_addr, u32 wr_data)
64 {
65 	void __iomem *addr, *wr, *cmd, *cmd_done;
66 
67 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
68 	wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
69 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
70 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
71 
72 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
73 		netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
74 			   wr_addr);
75 }
76 
xgene_enet_wr_axg_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 val)77 static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata,
78 				  u32 offset, u32 val)
79 {
80 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
81 
82 	iowrite32(val, addr);
83 }
84 
xgene_enet_rd_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)85 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
86 			      u32 offset, u32 *val)
87 {
88 	void __iomem *addr = pdata->eth_csr_addr + offset;
89 
90 	*val = ioread32(addr);
91 }
92 
xgene_enet_rd_diag_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)93 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
94 				   u32 offset, u32 *val)
95 {
96 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
97 
98 	*val = ioread32(addr);
99 }
100 
xgene_enet_rd_indirect(void __iomem * addr,void __iomem * rd,void __iomem * cmd,void __iomem * cmd_done,u32 rd_addr,u32 * rd_data)101 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
102 				   void __iomem *cmd, void __iomem *cmd_done,
103 				   u32 rd_addr, u32 *rd_data)
104 {
105 	u32 done;
106 	u8 wait = 10;
107 
108 	iowrite32(rd_addr, addr);
109 	iowrite32(XGENE_ENET_RD_CMD, cmd);
110 
111 	/* wait for read command to complete */
112 	while (!(done = ioread32(cmd_done)) && wait--)
113 		udelay(1);
114 
115 	if (!done)
116 		return false;
117 
118 	*rd_data = ioread32(rd);
119 	iowrite32(0, cmd);
120 
121 	return true;
122 }
123 
xgene_enet_rd_pcs(struct xgene_enet_pdata * pdata,u32 rd_addr,u32 * rd_data)124 static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
125 			      u32 rd_addr, u32 *rd_data)
126 {
127 	void __iomem *addr, *rd, *cmd, *cmd_done;
128 	bool success;
129 
130 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
131 	rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
132 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
133 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
134 
135 	success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
136 	if (!success)
137 		netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
138 			   rd_addr);
139 
140 	return success;
141 }
142 
xgene_enet_rd_axg_csr(struct xgene_enet_pdata * pdata,u32 offset,u32 * val)143 static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata,
144 				  u32 offset, u32 *val)
145 {
146 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
147 
148 	*val = ioread32(addr);
149 }
150 
xgene_enet_ecc_init(struct xgene_enet_pdata * pdata)151 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
152 {
153 	struct net_device *ndev = pdata->ndev;
154 	u32 data;
155 	u8 wait = 10;
156 
157 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
158 	do {
159 		usleep_range(100, 110);
160 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
161 	} while ((data != 0xffffffff) && wait--);
162 
163 	if (data != 0xffffffff) {
164 		netdev_err(ndev, "Failed to release memory from shutdown\n");
165 		return -ENODEV;
166 	}
167 
168 	return 0;
169 }
170 
xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata * pdata,u32 * rx,u32 * tx)171 static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
172 				     u32 *rx, u32 *tx)
173 {
174 	u32 count;
175 
176 	xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
177 	*rx = ICM_DROP_COUNT(count);
178 	*tx = ECM_DROP_COUNT(count);
179 	/* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
180 	xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
181 }
182 
xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata * pdata)183 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
184 {
185 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
186 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
187 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
188 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
189 }
190 
xgene_xgmac_reset(struct xgene_enet_pdata * pdata)191 static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
192 {
193 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
194 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
195 }
196 
xgene_pcs_reset(struct xgene_enet_pdata * pdata)197 static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
198 {
199 	u32 data;
200 
201 	if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
202 		return;
203 
204 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
205 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
206 }
207 
xgene_xgmac_set_mac_addr(struct xgene_enet_pdata * pdata)208 static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
209 {
210 	u32 addr0, addr1;
211 	u8 *dev_addr = pdata->ndev->dev_addr;
212 
213 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
214 		(dev_addr[1] << 8) | dev_addr[0];
215 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
216 
217 	xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
218 	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
219 }
220 
xgene_xgmac_set_mss(struct xgene_enet_pdata * pdata,u16 mss,u8 index)221 static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
222 				u16 mss, u8 index)
223 {
224 	u8 offset;
225 	u32 data;
226 
227 	offset = (index < 2) ? 0 : 4;
228 	xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
229 
230 	if (!(index & 0x1))
231 		data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
232 			SET_VAL(TSO_MSS0, mss);
233 	else
234 		data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
235 
236 	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
237 }
238 
xgene_xgmac_set_frame_size(struct xgene_enet_pdata * pdata,int size)239 static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
240 {
241 	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR,
242 			  ((((size + 2) >> 2) << 16) | size));
243 }
244 
xgene_enet_link_status(struct xgene_enet_pdata * pdata)245 static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
246 {
247 	u32 data;
248 
249 	xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
250 
251 	return data;
252 }
253 
xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata * pdata,bool enable)254 static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
255 					bool enable)
256 {
257 	u32 data;
258 
259 	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data);
260 
261 	if (enable)
262 		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
263 	else
264 		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
265 
266 	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data);
267 }
268 
xgene_xgmac_flowctl_tx(struct xgene_enet_pdata * pdata,bool enable)269 static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
270 {
271 	u32 data;
272 
273 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
274 
275 	if (enable)
276 		data |= HSTTCTLEN;
277 	else
278 		data &= ~HSTTCTLEN;
279 
280 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
281 
282 	pdata->mac_ops->enable_tx_pause(pdata, enable);
283 }
284 
xgene_xgmac_flowctl_rx(struct xgene_enet_pdata * pdata,bool enable)285 static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
286 {
287 	u32 data;
288 
289 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
290 
291 	if (enable)
292 		data |= HSTRCTLEN;
293 	else
294 		data &= ~HSTRCTLEN;
295 
296 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
297 }
298 
xgene_xgmac_init(struct xgene_enet_pdata * pdata)299 static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
300 {
301 	u32 data;
302 
303 	xgene_xgmac_reset(pdata);
304 
305 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
306 	data |= HSTPPEN;
307 	data &= ~HSTLENCHK;
308 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
309 
310 	xgene_xgmac_set_mac_addr(pdata);
311 
312 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
313 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
314 	/* Errata 10GE_1 - FIFO threshold default value incorrect */
315 	RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
316 	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
317 
318 	/* Errata 10GE_1 - FIFO threshold default value incorrect */
319 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
320 	RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
321 	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
322 
323 	xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
324 	data |= BIT(12);
325 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
326 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
327 	xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
328 	xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
329 
330 	/* Configure HW pause frame generation */
331 	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data);
332 	data = (DEF_QUANTA << 16) | (data & 0xFFFF);
333 	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data);
334 
335 	if (pdata->enet_id != XGENE_ENET1) {
336 		xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data);
337 		data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
338 		xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data);
339 	}
340 
341 	data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES;
342 	xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data);
343 
344 	xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause);
345 	xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause);
346 }
347 
xgene_xgmac_rx_enable(struct xgene_enet_pdata * pdata)348 static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
349 {
350 	u32 data;
351 
352 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
353 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
354 }
355 
xgene_xgmac_tx_enable(struct xgene_enet_pdata * pdata)356 static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
357 {
358 	u32 data;
359 
360 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
361 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
362 }
363 
xgene_xgmac_rx_disable(struct xgene_enet_pdata * pdata)364 static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
365 {
366 	u32 data;
367 
368 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
369 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
370 }
371 
xgene_xgmac_tx_disable(struct xgene_enet_pdata * pdata)372 static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
373 {
374 	u32 data;
375 
376 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
377 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
378 }
379 
xgene_enet_reset(struct xgene_enet_pdata * pdata)380 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
381 {
382 	struct device *dev = &pdata->pdev->dev;
383 
384 	if (!xgene_ring_mgr_init(pdata))
385 		return -ENODEV;
386 
387 	if (dev->of_node) {
388 		clk_prepare_enable(pdata->clk);
389 		udelay(5);
390 		clk_disable_unprepare(pdata->clk);
391 		udelay(5);
392 		clk_prepare_enable(pdata->clk);
393 		udelay(5);
394 	} else {
395 #ifdef CONFIG_ACPI
396 		acpi_status status;
397 
398 		status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
399 					      "_RST", NULL, NULL);
400 		if (ACPI_FAILURE(status)) {
401 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
402 					     "_INI", NULL, NULL);
403 		}
404 #endif
405 	}
406 
407 	xgene_enet_ecc_init(pdata);
408 	xgene_enet_config_ring_if_assoc(pdata);
409 
410 	return 0;
411 }
412 
xgene_enet_xgcle_bypass(struct xgene_enet_pdata * pdata,u32 dst_ring_num,u16 bufpool_id,u16 nxtbufpool_id)413 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
414 				    u32 dst_ring_num, u16 bufpool_id,
415 				    u16 nxtbufpool_id)
416 {
417 	u32 cb, fpsel, nxtfpsel;
418 
419 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
420 	cb |= CFG_CLE_BYPASS_EN0;
421 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
422 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
423 
424 	fpsel = xgene_enet_get_fpsel(bufpool_id);
425 	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
426 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
427 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
428 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
429 	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
430 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
431 	pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
432 }
433 
xgene_enet_shutdown(struct xgene_enet_pdata * pdata)434 static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
435 {
436 	struct device *dev = &pdata->pdev->dev;
437 
438 	if (dev->of_node) {
439 		if (!IS_ERR(pdata->clk))
440 			clk_disable_unprepare(pdata->clk);
441 	}
442 }
443 
xgene_enet_clear(struct xgene_enet_pdata * pdata,struct xgene_enet_desc_ring * ring)444 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
445 			     struct xgene_enet_desc_ring *ring)
446 {
447 	u32 addr, data;
448 
449 	if (xgene_enet_is_bufpool(ring->id)) {
450 		addr = ENET_CFGSSQMIFPRESET_ADDR;
451 		data = BIT(xgene_enet_get_fpsel(ring->id));
452 	} else {
453 		addr = ENET_CFGSSQMIWQRESET_ADDR;
454 		data = BIT(xgene_enet_ring_bufnum(ring->id));
455 	}
456 
457 	xgene_enet_wr_ring_if(pdata, addr, data);
458 }
459 
xgene_enet_gpio_lookup(struct xgene_enet_pdata * pdata)460 static int xgene_enet_gpio_lookup(struct xgene_enet_pdata *pdata)
461 {
462 	struct device *dev = &pdata->pdev->dev;
463 
464 	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
465 	if (IS_ERR(pdata->sfp_rdy))
466 		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
467 
468 	if (IS_ERR(pdata->sfp_rdy))
469 		return -ENODEV;
470 
471 	return 0;
472 }
473 
xgene_enet_link_state(struct work_struct * work)474 static void xgene_enet_link_state(struct work_struct *work)
475 {
476 	struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
477 					 struct xgene_enet_pdata, link_work);
478 	struct net_device *ndev = pdata->ndev;
479 	u32 link_status, poll_interval;
480 
481 	link_status = xgene_enet_link_status(pdata);
482 	if (pdata->sfp_gpio_en && link_status &&
483 	    (!IS_ERR(pdata->sfp_rdy) || !xgene_enet_gpio_lookup(pdata)) &&
484 	    !gpiod_get_value(pdata->sfp_rdy))
485 		link_status = 0;
486 
487 	if (link_status) {
488 		if (!netif_carrier_ok(ndev)) {
489 			netif_carrier_on(ndev);
490 			xgene_xgmac_rx_enable(pdata);
491 			xgene_xgmac_tx_enable(pdata);
492 			netdev_info(ndev, "Link is Up - 10Gbps\n");
493 		}
494 		poll_interval = PHY_POLL_LINK_ON;
495 	} else {
496 		if (netif_carrier_ok(ndev)) {
497 			xgene_xgmac_rx_disable(pdata);
498 			xgene_xgmac_tx_disable(pdata);
499 			netif_carrier_off(ndev);
500 			netdev_info(ndev, "Link is Down\n");
501 		}
502 		poll_interval = PHY_POLL_LINK_OFF;
503 
504 		xgene_pcs_reset(pdata);
505 	}
506 
507 	schedule_delayed_work(&pdata->link_work, poll_interval);
508 }
509 
510 const struct xgene_mac_ops xgene_xgmac_ops = {
511 	.init = xgene_xgmac_init,
512 	.reset = xgene_xgmac_reset,
513 	.rx_enable = xgene_xgmac_rx_enable,
514 	.tx_enable = xgene_xgmac_tx_enable,
515 	.rx_disable = xgene_xgmac_rx_disable,
516 	.tx_disable = xgene_xgmac_tx_disable,
517 	.set_mac_addr = xgene_xgmac_set_mac_addr,
518 	.set_framesize = xgene_xgmac_set_frame_size,
519 	.set_mss = xgene_xgmac_set_mss,
520 	.get_drop_cnt = xgene_xgmac_get_drop_cnt,
521 	.link_state = xgene_enet_link_state,
522 	.enable_tx_pause = xgene_xgmac_enable_tx_pause,
523 	.flowctl_rx = xgene_xgmac_flowctl_rx,
524 	.flowctl_tx = xgene_xgmac_flowctl_tx
525 };
526 
527 const struct xgene_port_ops xgene_xgport_ops = {
528 	.reset = xgene_enet_reset,
529 	.clear = xgene_enet_clear,
530 	.cle_bypass = xgene_enet_xgcle_bypass,
531 	.shutdown = xgene_enet_shutdown,
532 };
533