1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 
17 #define PHY_ID_TJA_1103			0x001BB010
18 
19 #define PMAPMD_B100T1_PMAPMD_CTL	0x0834
20 #define B100T1_PMAPMD_CONFIG_EN		BIT(15)
21 #define B100T1_PMAPMD_MASTER		BIT(14)
22 #define MASTER_MODE			(B100T1_PMAPMD_CONFIG_EN | \
23 					 B100T1_PMAPMD_MASTER)
24 #define SLAVE_MODE			(B100T1_PMAPMD_CONFIG_EN)
25 
26 #define VEND1_DEVICE_CONTROL		0x0040
27 #define DEVICE_CONTROL_RESET		BIT(15)
28 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
29 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
30 
31 #define VEND1_PHY_IRQ_ACK		0x80A0
32 #define VEND1_PHY_IRQ_EN		0x80A1
33 #define VEND1_PHY_IRQ_STATUS		0x80A2
34 #define PHY_IRQ_LINK_EVENT		BIT(1)
35 
36 #define VEND1_PHY_CONTROL		0x8100
37 #define PHY_CONFIG_EN			BIT(14)
38 #define PHY_START_OP			BIT(0)
39 
40 #define VEND1_PHY_CONFIG		0x8108
41 #define PHY_CONFIG_AUTO			BIT(0)
42 
43 #define VEND1_SIGNAL_QUALITY		0x8320
44 #define SQI_VALID			BIT(14)
45 #define SQI_MASK			GENMASK(2, 0)
46 #define MAX_SQI				SQI_MASK
47 
48 #define VEND1_CABLE_TEST		0x8330
49 #define CABLE_TEST_ENABLE		BIT(15)
50 #define CABLE_TEST_START		BIT(14)
51 #define CABLE_TEST_VALID		BIT(13)
52 #define CABLE_TEST_OK			0x00
53 #define CABLE_TEST_SHORTED		0x01
54 #define CABLE_TEST_OPEN			0x02
55 #define CABLE_TEST_UNKNOWN		0x07
56 
57 #define VEND1_PORT_CONTROL		0x8040
58 #define PORT_CONTROL_EN			BIT(14)
59 
60 #define VEND1_PORT_INFRA_CONTROL	0xAC00
61 #define PORT_INFRA_CONTROL_EN		BIT(14)
62 
63 #define VEND1_RXID			0xAFCC
64 #define VEND1_TXID			0xAFCD
65 #define ID_ENABLE			BIT(15)
66 
67 #define VEND1_ABILITIES			0xAFC4
68 #define RGMII_ID_ABILITY		BIT(15)
69 #define RGMII_ABILITY			BIT(14)
70 #define RMII_ABILITY			BIT(10)
71 #define REVMII_ABILITY			BIT(9)
72 #define MII_ABILITY			BIT(8)
73 #define SGMII_ABILITY			BIT(0)
74 
75 #define VEND1_MII_BASIC_CONFIG		0xAFC6
76 #define MII_BASIC_CONFIG_REV		BIT(8)
77 #define MII_BASIC_CONFIG_SGMII		0x9
78 #define MII_BASIC_CONFIG_RGMII		0x7
79 #define MII_BASIC_CONFIG_RMII		0x5
80 #define MII_BASIC_CONFIG_MII		0x4
81 
82 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
83 #define VEND1_LINK_DROP_COUNTER		0x8352
84 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
85 #define VEND1_R_GOOD_FRAME_CNT		0xA950
86 #define VEND1_R_BAD_FRAME_CNT		0xA952
87 #define VEND1_R_RXER_FRAME_CNT		0xA954
88 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
89 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
90 #define VEND1_RX_IPG_LENGTH		0xAFD0
91 #define VEND1_TX_IPG_LENGTH		0xAFD1
92 #define COUNTER_EN			BIT(15)
93 
94 #define RGMII_PERIOD_PS			8000U
95 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
96 #define MIN_ID_PS			1644U
97 #define MAX_ID_PS			2260U
98 #define DEFAULT_ID_PS			2000U
99 
100 struct nxp_c45_phy {
101 	u32 tx_delay;
102 	u32 rx_delay;
103 };
104 
105 struct nxp_c45_phy_stats {
106 	const char	*name;
107 	u8		mmd;
108 	u16		reg;
109 	u8		off;
110 	u16		mask;
111 };
112 
113 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
114 	{ "phy_symbol_error_cnt", MDIO_MMD_VEND1,
115 		VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
116 	{ "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
117 		VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
118 	{ "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
119 		VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
120 	{ "phy_link_loss_cnt", MDIO_MMD_VEND1,
121 		VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
122 	{ "phy_link_failure_cnt", MDIO_MMD_VEND1,
123 		VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
124 	{ "r_good_frame_cnt", MDIO_MMD_VEND1,
125 		VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
126 	{ "r_bad_frame_cnt", MDIO_MMD_VEND1,
127 		VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
128 	{ "r_rxer_frame_cnt", MDIO_MMD_VEND1,
129 		VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
130 	{ "rx_preamble_count", MDIO_MMD_VEND1,
131 		VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
132 	{ "tx_preamble_count", MDIO_MMD_VEND1,
133 		VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
134 	{ "rx_ipg_length", MDIO_MMD_VEND1,
135 		VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
136 	{ "tx_ipg_length", MDIO_MMD_VEND1,
137 		VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
138 };
139 
nxp_c45_get_sset_count(struct phy_device * phydev)140 static int nxp_c45_get_sset_count(struct phy_device *phydev)
141 {
142 	return ARRAY_SIZE(nxp_c45_hw_stats);
143 }
144 
nxp_c45_get_strings(struct phy_device * phydev,u8 * data)145 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
146 {
147 	size_t i;
148 
149 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
150 		strncpy(data + i * ETH_GSTRING_LEN,
151 			nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
152 	}
153 }
154 
nxp_c45_get_stats(struct phy_device * phydev,struct ethtool_stats * stats,u64 * data)155 static void nxp_c45_get_stats(struct phy_device *phydev,
156 			      struct ethtool_stats *stats, u64 *data)
157 {
158 	size_t i;
159 	int ret;
160 
161 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
162 		ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
163 				   nxp_c45_hw_stats[i].reg);
164 		if (ret < 0) {
165 			data[i] = U64_MAX;
166 		} else {
167 			data[i] = ret & nxp_c45_hw_stats[i].mask;
168 			data[i] >>= nxp_c45_hw_stats[i].off;
169 		}
170 	}
171 }
172 
nxp_c45_config_enable(struct phy_device * phydev)173 static int nxp_c45_config_enable(struct phy_device *phydev)
174 {
175 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
176 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
177 		      DEVICE_CONTROL_CONFIG_ALL_EN);
178 	usleep_range(400, 450);
179 
180 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
181 		      PORT_CONTROL_EN);
182 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
183 		      PHY_CONFIG_EN);
184 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
185 		      PORT_INFRA_CONTROL_EN);
186 
187 	return 0;
188 }
189 
nxp_c45_start_op(struct phy_device * phydev)190 static int nxp_c45_start_op(struct phy_device *phydev)
191 {
192 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
193 				PHY_START_OP);
194 }
195 
nxp_c45_config_intr(struct phy_device * phydev)196 static int nxp_c45_config_intr(struct phy_device *phydev)
197 {
198 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
199 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
200 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
201 	else
202 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
203 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
204 }
205 
nxp_c45_handle_interrupt(struct phy_device * phydev)206 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
207 {
208 	irqreturn_t ret = IRQ_NONE;
209 	int irq;
210 
211 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
212 	if (irq & PHY_IRQ_LINK_EVENT) {
213 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
214 			      PHY_IRQ_LINK_EVENT);
215 		phy_trigger_machine(phydev);
216 		ret = IRQ_HANDLED;
217 	}
218 
219 	return ret;
220 }
221 
nxp_c45_soft_reset(struct phy_device * phydev)222 static int nxp_c45_soft_reset(struct phy_device *phydev)
223 {
224 	int ret;
225 
226 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
227 			    DEVICE_CONTROL_RESET);
228 	if (ret)
229 		return ret;
230 
231 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
232 					 VEND1_DEVICE_CONTROL, ret,
233 					 !(ret & DEVICE_CONTROL_RESET), 20000,
234 					 240000, false);
235 }
236 
nxp_c45_cable_test_start(struct phy_device * phydev)237 static int nxp_c45_cable_test_start(struct phy_device *phydev)
238 {
239 	return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
240 			     CABLE_TEST_ENABLE | CABLE_TEST_START);
241 }
242 
nxp_c45_cable_test_get_status(struct phy_device * phydev,bool * finished)243 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
244 					 bool *finished)
245 {
246 	int ret;
247 	u8 cable_test_result;
248 
249 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
250 	if (!(ret & CABLE_TEST_VALID)) {
251 		*finished = false;
252 		return 0;
253 	}
254 
255 	*finished = true;
256 	cable_test_result = ret & GENMASK(2, 0);
257 
258 	switch (cable_test_result) {
259 	case CABLE_TEST_OK:
260 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
261 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
262 		break;
263 	case CABLE_TEST_SHORTED:
264 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
265 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
266 		break;
267 	case CABLE_TEST_OPEN:
268 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
269 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
270 		break;
271 	default:
272 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
273 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
274 	}
275 
276 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
277 			   CABLE_TEST_ENABLE);
278 
279 	return nxp_c45_start_op(phydev);
280 }
281 
nxp_c45_setup_master_slave(struct phy_device * phydev)282 static int nxp_c45_setup_master_slave(struct phy_device *phydev)
283 {
284 	switch (phydev->master_slave_set) {
285 	case MASTER_SLAVE_CFG_MASTER_FORCE:
286 	case MASTER_SLAVE_CFG_MASTER_PREFERRED:
287 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
288 			      MASTER_MODE);
289 		break;
290 	case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
291 	case MASTER_SLAVE_CFG_SLAVE_FORCE:
292 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
293 			      SLAVE_MODE);
294 		break;
295 	case MASTER_SLAVE_CFG_UNKNOWN:
296 	case MASTER_SLAVE_CFG_UNSUPPORTED:
297 		return 0;
298 	default:
299 		phydev_warn(phydev, "Unsupported Master/Slave mode\n");
300 		return -EOPNOTSUPP;
301 	}
302 
303 	return 0;
304 }
305 
nxp_c45_read_master_slave(struct phy_device * phydev)306 static int nxp_c45_read_master_slave(struct phy_device *phydev)
307 {
308 	int reg;
309 
310 	phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
311 	phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
312 
313 	reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
314 	if (reg < 0)
315 		return reg;
316 
317 	if (reg & B100T1_PMAPMD_MASTER) {
318 		phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
319 		phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
320 	} else {
321 		phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
322 		phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
323 	}
324 
325 	return 0;
326 }
327 
nxp_c45_config_aneg(struct phy_device * phydev)328 static int nxp_c45_config_aneg(struct phy_device *phydev)
329 {
330 	return nxp_c45_setup_master_slave(phydev);
331 }
332 
nxp_c45_read_status(struct phy_device * phydev)333 static int nxp_c45_read_status(struct phy_device *phydev)
334 {
335 	int ret;
336 
337 	ret = genphy_c45_read_status(phydev);
338 	if (ret)
339 		return ret;
340 
341 	ret = nxp_c45_read_master_slave(phydev);
342 	if (ret)
343 		return ret;
344 
345 	return 0;
346 }
347 
nxp_c45_get_sqi(struct phy_device * phydev)348 static int nxp_c45_get_sqi(struct phy_device *phydev)
349 {
350 	int reg;
351 
352 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
353 	if (!(reg & SQI_VALID))
354 		return -EINVAL;
355 
356 	reg &= SQI_MASK;
357 
358 	return reg;
359 }
360 
nxp_c45_get_sqi_max(struct phy_device * phydev)361 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
362 {
363 	return MAX_SQI;
364 }
365 
nxp_c45_check_delay(struct phy_device * phydev,u32 delay)366 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
367 {
368 	if (delay < MIN_ID_PS) {
369 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
370 		return -EINVAL;
371 	}
372 
373 	if (delay > MAX_ID_PS) {
374 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
375 		return -EINVAL;
376 	}
377 
378 	return 0;
379 }
380 
nxp_c45_get_phase_shift(u64 phase_offset_raw)381 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
382 {
383 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
384 	 * To avoid floating point operations we'll multiply by 10
385 	 * and get 1 decimal point precision.
386 	 */
387 	phase_offset_raw *= 10;
388 	phase_offset_raw -= 738;
389 	return div_u64(phase_offset_raw, 9);
390 }
391 
nxp_c45_disable_delays(struct phy_device * phydev)392 static void nxp_c45_disable_delays(struct phy_device *phydev)
393 {
394 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
395 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
396 }
397 
nxp_c45_set_delays(struct phy_device * phydev)398 static void nxp_c45_set_delays(struct phy_device *phydev)
399 {
400 	struct nxp_c45_phy *priv = phydev->priv;
401 	u64 tx_delay = priv->tx_delay;
402 	u64 rx_delay = priv->rx_delay;
403 	u64 degree;
404 
405 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
406 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
407 		degree = div_u64(tx_delay, PS_PER_DEGREE);
408 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
409 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
410 	} else {
411 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
412 				   ID_ENABLE);
413 	}
414 
415 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
416 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
417 		degree = div_u64(rx_delay, PS_PER_DEGREE);
418 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
419 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
420 	} else {
421 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
422 				   ID_ENABLE);
423 	}
424 }
425 
nxp_c45_get_delays(struct phy_device * phydev)426 static int nxp_c45_get_delays(struct phy_device *phydev)
427 {
428 	struct nxp_c45_phy *priv = phydev->priv;
429 	int ret;
430 
431 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
432 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
433 		ret = device_property_read_u32(&phydev->mdio.dev,
434 					       "tx-internal-delay-ps",
435 					       &priv->tx_delay);
436 		if (ret)
437 			priv->tx_delay = DEFAULT_ID_PS;
438 
439 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
440 		if (ret) {
441 			phydev_err(phydev,
442 				   "tx-internal-delay-ps invalid value\n");
443 			return ret;
444 		}
445 	}
446 
447 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
448 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
449 		ret = device_property_read_u32(&phydev->mdio.dev,
450 					       "rx-internal-delay-ps",
451 					       &priv->rx_delay);
452 		if (ret)
453 			priv->rx_delay = DEFAULT_ID_PS;
454 
455 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
456 		if (ret) {
457 			phydev_err(phydev,
458 				   "rx-internal-delay-ps invalid value\n");
459 			return ret;
460 		}
461 	}
462 
463 	return 0;
464 }
465 
nxp_c45_set_phy_mode(struct phy_device * phydev)466 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
467 {
468 	int ret;
469 
470 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
471 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
472 
473 	switch (phydev->interface) {
474 	case PHY_INTERFACE_MODE_RGMII:
475 		if (!(ret & RGMII_ABILITY)) {
476 			phydev_err(phydev, "rgmii mode not supported\n");
477 			return -EINVAL;
478 		}
479 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
480 			      MII_BASIC_CONFIG_RGMII);
481 		nxp_c45_disable_delays(phydev);
482 		break;
483 	case PHY_INTERFACE_MODE_RGMII_ID:
484 	case PHY_INTERFACE_MODE_RGMII_TXID:
485 	case PHY_INTERFACE_MODE_RGMII_RXID:
486 		if (!(ret & RGMII_ID_ABILITY)) {
487 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
488 			return -EINVAL;
489 		}
490 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
491 			      MII_BASIC_CONFIG_RGMII);
492 		ret = nxp_c45_get_delays(phydev);
493 		if (ret)
494 			return ret;
495 
496 		nxp_c45_set_delays(phydev);
497 		break;
498 	case PHY_INTERFACE_MODE_MII:
499 		if (!(ret & MII_ABILITY)) {
500 			phydev_err(phydev, "mii mode not supported\n");
501 			return -EINVAL;
502 		}
503 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
504 			      MII_BASIC_CONFIG_MII);
505 		break;
506 	case PHY_INTERFACE_MODE_REVMII:
507 		if (!(ret & REVMII_ABILITY)) {
508 			phydev_err(phydev, "rev-mii mode not supported\n");
509 			return -EINVAL;
510 		}
511 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
512 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
513 		break;
514 	case PHY_INTERFACE_MODE_RMII:
515 		if (!(ret & RMII_ABILITY)) {
516 			phydev_err(phydev, "rmii mode not supported\n");
517 			return -EINVAL;
518 		}
519 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
520 			      MII_BASIC_CONFIG_RMII);
521 		break;
522 	case PHY_INTERFACE_MODE_SGMII:
523 		if (!(ret & SGMII_ABILITY)) {
524 			phydev_err(phydev, "sgmii mode not supported\n");
525 			return -EINVAL;
526 		}
527 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
528 			      MII_BASIC_CONFIG_SGMII);
529 		break;
530 	case PHY_INTERFACE_MODE_INTERNAL:
531 		break;
532 	default:
533 		return -EINVAL;
534 	}
535 
536 	return 0;
537 }
538 
nxp_c45_config_init(struct phy_device * phydev)539 static int nxp_c45_config_init(struct phy_device *phydev)
540 {
541 	int ret;
542 
543 	ret = nxp_c45_config_enable(phydev);
544 	if (ret) {
545 		phydev_err(phydev, "Failed to enable config\n");
546 		return ret;
547 	}
548 
549 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
550 			 PHY_CONFIG_AUTO);
551 
552 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
553 			 COUNTER_EN);
554 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
555 			 COUNTER_EN);
556 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
557 			 COUNTER_EN);
558 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
559 			 COUNTER_EN);
560 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
561 			 COUNTER_EN);
562 
563 	ret = nxp_c45_set_phy_mode(phydev);
564 	if (ret)
565 		return ret;
566 
567 	phydev->autoneg = AUTONEG_DISABLE;
568 
569 	return nxp_c45_start_op(phydev);
570 }
571 
nxp_c45_probe(struct phy_device * phydev)572 static int nxp_c45_probe(struct phy_device *phydev)
573 {
574 	struct nxp_c45_phy *priv;
575 
576 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
577 	if (!priv)
578 		return -ENOMEM;
579 
580 	phydev->priv = priv;
581 
582 	return 0;
583 }
584 
585 static struct phy_driver nxp_c45_driver[] = {
586 	{
587 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
588 		.name			= "NXP C45 TJA1103",
589 		.features		= PHY_BASIC_T1_FEATURES,
590 		.probe			= nxp_c45_probe,
591 		.soft_reset		= nxp_c45_soft_reset,
592 		.config_aneg		= nxp_c45_config_aneg,
593 		.config_init		= nxp_c45_config_init,
594 		.config_intr		= nxp_c45_config_intr,
595 		.handle_interrupt	= nxp_c45_handle_interrupt,
596 		.read_status		= nxp_c45_read_status,
597 		.suspend		= genphy_c45_pma_suspend,
598 		.resume			= genphy_c45_pma_resume,
599 		.get_sset_count		= nxp_c45_get_sset_count,
600 		.get_strings		= nxp_c45_get_strings,
601 		.get_stats		= nxp_c45_get_stats,
602 		.cable_test_start	= nxp_c45_cable_test_start,
603 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
604 		.set_loopback		= genphy_c45_loopback,
605 		.get_sqi		= nxp_c45_get_sqi,
606 		.get_sqi_max		= nxp_c45_get_sqi_max,
607 	},
608 };
609 
610 module_phy_driver(nxp_c45_driver);
611 
612 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
613 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
614 	{ /*sentinel*/ },
615 };
616 
617 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
618 
619 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
620 MODULE_DESCRIPTION("NXP C45 PHY driver");
621 MODULE_LICENSE("GPL v2");
622