1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  *
6  * The Sparx5 Chip Register Model can be browsed at this location:
7  * https://github.com/microchip-ung/sparx-5_reginfo
8  */
9 #include <linux/ptp_classify.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 
14 #define SPARX5_MAX_PTP_ID	512
15 
16 #define TOD_ACC_PIN		0x4
17 
18 enum {
19 	PTP_PIN_ACTION_IDLE = 0,
20 	PTP_PIN_ACTION_LOAD,
21 	PTP_PIN_ACTION_SAVE,
22 	PTP_PIN_ACTION_CLOCK,
23 	PTP_PIN_ACTION_DELTA,
24 	PTP_PIN_ACTION_TOD
25 };
26 
27 static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
28 {
29 	/* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
30 	 * 1.99609375000(500), 3.99218750000(250) as reference
31 	 * The value is calculated as following:
32 	 * (1/1000000)/((2^-59)/X)
33 	 */
34 
35 	u64 res = 0;
36 
37 	switch (sparx5->coreclock) {
38 	case SPX5_CORE_CLOCK_250MHZ:
39 		res = 2301339409586;
40 		break;
41 	case SPX5_CORE_CLOCK_500MHZ:
42 		res = 1150669704793;
43 		break;
44 	case SPX5_CORE_CLOCK_625MHZ:
45 		res =  920535763834;
46 		break;
47 	default:
48 		WARN(1, "Invalid core clock");
49 		break;
50 	}
51 
52 	return res;
53 }
54 
55 static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
56 {
57 	u64 res = 0;
58 
59 	switch (sparx5->coreclock) {
60 	case SPX5_CORE_CLOCK_250MHZ:
61 		res = 0x1FF0000000000000;
62 		break;
63 	case SPX5_CORE_CLOCK_500MHZ:
64 		res = 0x0FF8000000000000;
65 		break;
66 	case SPX5_CORE_CLOCK_625MHZ:
67 		res = 0x0CC6666666666666;
68 		break;
69 	default:
70 		WARN(1, "Invalid core clock");
71 		break;
72 	}
73 
74 	return res;
75 }
76 
77 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
78 {
79 	struct sparx5 *sparx5 = port->sparx5;
80 	struct hwtstamp_config cfg;
81 	struct sparx5_phc *phc;
82 
83 	/* For now don't allow to run ptp on ports that are part of a bridge,
84 	 * because in case of transparent clock the HW will still forward the
85 	 * frames, so there would be duplicate frames
86 	 */
87 
88 	if (test_bit(port->portno, sparx5->bridge_mask))
89 		return -EINVAL;
90 
91 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
92 		return -EFAULT;
93 
94 	switch (cfg.tx_type) {
95 	case HWTSTAMP_TX_ON:
96 		port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
97 		break;
98 	case HWTSTAMP_TX_ONESTEP_SYNC:
99 		port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
100 		break;
101 	case HWTSTAMP_TX_OFF:
102 		port->ptp_cmd = IFH_REW_OP_NOOP;
103 		break;
104 	default:
105 		return -ERANGE;
106 	}
107 
108 	switch (cfg.rx_filter) {
109 	case HWTSTAMP_FILTER_NONE:
110 		break;
111 	case HWTSTAMP_FILTER_ALL:
112 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
113 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
114 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
115 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
116 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
117 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
118 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
119 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
120 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
121 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
122 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
123 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
124 	case HWTSTAMP_FILTER_NTP_ALL:
125 		cfg.rx_filter = HWTSTAMP_FILTER_ALL;
126 		break;
127 	default:
128 		return -ERANGE;
129 	}
130 
131 	/* Commit back the result & save it */
132 	mutex_lock(&sparx5->ptp_lock);
133 	phc = &sparx5->phc[SPARX5_PHC_PORT];
134 	memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
135 	mutex_unlock(&sparx5->ptp_lock);
136 
137 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
138 }
139 
140 int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
141 {
142 	struct sparx5 *sparx5 = port->sparx5;
143 	struct sparx5_phc *phc;
144 
145 	phc = &sparx5->phc[SPARX5_PHC_PORT];
146 	return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
147 			    sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
148 }
149 
150 static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
151 				u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
152 {
153 	struct ptp_header *header;
154 	u8 msgtype;
155 	int type;
156 
157 	if (port->ptp_cmd == IFH_REW_OP_NOOP) {
158 		*rew_op = IFH_REW_OP_NOOP;
159 		*pdu_type = IFH_PDU_TYPE_NONE;
160 		*pdu_w16_offset = 0;
161 		return;
162 	}
163 
164 	type = ptp_classify_raw(skb);
165 	if (type == PTP_CLASS_NONE) {
166 		*rew_op = IFH_REW_OP_NOOP;
167 		*pdu_type = IFH_PDU_TYPE_NONE;
168 		*pdu_w16_offset = 0;
169 		return;
170 	}
171 
172 	header = ptp_parse_header(skb, type);
173 	if (!header) {
174 		*rew_op = IFH_REW_OP_NOOP;
175 		*pdu_type = IFH_PDU_TYPE_NONE;
176 		*pdu_w16_offset = 0;
177 		return;
178 	}
179 
180 	*pdu_w16_offset = 7;
181 	if (type & PTP_CLASS_L2)
182 		*pdu_type = IFH_PDU_TYPE_PTP;
183 	if (type & PTP_CLASS_IPV4)
184 		*pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
185 	if (type & PTP_CLASS_IPV6)
186 		*pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
187 
188 	if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
189 		*rew_op = IFH_REW_OP_TWO_STEP_PTP;
190 		return;
191 	}
192 
193 	/* If it is sync and run 1 step then set the correct operation,
194 	 * otherwise run as 2 step
195 	 */
196 	msgtype = ptp_get_msgtype(header, type);
197 	if ((msgtype & 0xf) == 0) {
198 		*rew_op = IFH_REW_OP_ONE_STEP_PTP;
199 		return;
200 	}
201 
202 	*rew_op = IFH_REW_OP_TWO_STEP_PTP;
203 }
204 
205 static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
206 {
207 	struct sk_buff *skb, *skb_tmp;
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&port->tx_skbs.lock, flags);
211 	skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
212 		if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
213 			      jiffies)
214 			break;
215 
216 		__skb_unlink(skb, &port->tx_skbs);
217 		dev_kfree_skb_any(skb);
218 	}
219 	spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
220 }
221 
222 int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
223 				struct sk_buff *skb)
224 {
225 	struct sparx5 *sparx5 = port->sparx5;
226 	u8 rew_op, pdu_type, pdu_w16_offset;
227 	unsigned long flags;
228 
229 	sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
230 	SPARX5_SKB_CB(skb)->rew_op = rew_op;
231 	SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
232 	SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
233 
234 	if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
235 		return 0;
236 
237 	sparx5_ptp_txtstamp_old_release(port);
238 
239 	spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
240 	if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
241 		spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
242 		return -EBUSY;
243 	}
244 
245 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
246 
247 	skb_queue_tail(&port->tx_skbs, skb);
248 	SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
249 	SPARX5_SKB_CB(skb)->jiffies = jiffies;
250 
251 	sparx5->ptp_skbs++;
252 	port->ts_id++;
253 	if (port->ts_id == SPARX5_MAX_PTP_ID)
254 		port->ts_id = 0;
255 
256 	spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
257 
258 	return 0;
259 }
260 
261 void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
262 				 struct sk_buff *skb)
263 {
264 	struct sparx5 *sparx5 = port->sparx5;
265 	unsigned long flags;
266 
267 	spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
268 	port->ts_id--;
269 	sparx5->ptp_skbs--;
270 	skb_unlink(skb, &port->tx_skbs);
271 	spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
272 }
273 
274 static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
275 				   struct timespec64 *ts,
276 				   u32 nsec)
277 {
278 	/* Read current PTP time to get seconds */
279 	unsigned long flags;
280 	u32 curr_nsec;
281 
282 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
283 
284 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
285 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
286 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
287 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
288 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
289 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
290 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
291 
292 	ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
293 	curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
294 
295 	ts->tv_nsec = nsec;
296 
297 	/* Sec has incremented since the ts was registered */
298 	if (curr_nsec < nsec)
299 		ts->tv_sec--;
300 
301 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
302 }
303 
304 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
305 {
306 	int budget = SPARX5_MAX_PTP_ID;
307 	struct sparx5 *sparx5 = args;
308 
309 	while (budget--) {
310 		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
311 		struct skb_shared_hwtstamps shhwtstamps;
312 		struct sparx5_port *port;
313 		struct timespec64 ts;
314 		unsigned long flags;
315 		u32 val, id, txport;
316 		u32 delay;
317 
318 		val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
319 
320 		/* Check if a timestamp can be retrieved */
321 		if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
322 			break;
323 
324 		WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
325 
326 		if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
327 			continue;
328 
329 		/* Retrieve the ts Tx port */
330 		txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
331 
332 		/* Retrieve its associated skb */
333 		port = sparx5->ports[txport];
334 
335 		/* Retrieve the delay */
336 		delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
337 		delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
338 
339 		/* Get next timestamp from fifo, which needs to be the
340 		 * rx timestamp which represents the id of the frame
341 		 */
342 		spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
343 			 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
344 			 sparx5, REW_PTP_TWOSTEP_CTRL);
345 
346 		val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
347 
348 		/* Check if a timestamp can be retried */
349 		if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
350 			break;
351 
352 		/* Read RX timestamping to get the ID */
353 		id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
354 		id <<= 8;
355 		id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
356 
357 		spin_lock_irqsave(&port->tx_skbs.lock, flags);
358 		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
359 			if (SPARX5_SKB_CB(skb)->ts_id != id)
360 				continue;
361 
362 			__skb_unlink(skb, &port->tx_skbs);
363 			skb_match = skb;
364 			break;
365 		}
366 		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
367 
368 		/* Next ts */
369 		spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
370 			 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
371 			 sparx5, REW_PTP_TWOSTEP_CTRL);
372 
373 		if (WARN_ON(!skb_match))
374 			continue;
375 
376 		spin_lock(&sparx5->ptp_ts_id_lock);
377 		sparx5->ptp_skbs--;
378 		spin_unlock(&sparx5->ptp_ts_id_lock);
379 
380 		/* Get the h/w timestamp */
381 		sparx5_get_hwtimestamp(sparx5, &ts, delay);
382 
383 		/* Set the timestamp into the skb */
384 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
385 		skb_tstamp_tx(skb_match, &shhwtstamps);
386 
387 		dev_kfree_skb_any(skb_match);
388 	}
389 
390 	return IRQ_HANDLED;
391 }
392 
393 static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
394 {
395 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
396 	struct sparx5 *sparx5 = phc->sparx5;
397 	unsigned long flags;
398 	bool neg_adj = 0;
399 	u64 tod_inc;
400 	u64 ref;
401 
402 	if (!scaled_ppm)
403 		return 0;
404 
405 	if (scaled_ppm < 0) {
406 		neg_adj = 1;
407 		scaled_ppm = -scaled_ppm;
408 	}
409 
410 	tod_inc = sparx5_ptp_get_nominal_value(sparx5);
411 
412 	/* The multiplication is split in 2 separate additions because of
413 	 * overflow issues. If scaled_ppm with 16bit fractional part was bigger
414 	 * than 20ppm then we got overflow.
415 	 */
416 	ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
417 	ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
418 	tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
419 
420 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
421 
422 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
423 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
424 		 sparx5, PTP_PTP_DOM_CFG);
425 
426 	spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
427 		PTP_CLK_PER_CFG(phc->index, 0));
428 	spx5_wr((u32)(tod_inc >> 32), sparx5,
429 		PTP_CLK_PER_CFG(phc->index, 1));
430 
431 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
432 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
433 		 PTP_PTP_DOM_CFG);
434 
435 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
436 
437 	return 0;
438 }
439 
440 static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
441 				const struct timespec64 *ts)
442 {
443 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
444 	struct sparx5 *sparx5 = phc->sparx5;
445 	unsigned long flags;
446 
447 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
448 
449 	/* Must be in IDLE mode before the time can be loaded */
450 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
451 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
452 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
453 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
454 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
455 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
456 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
457 
458 	/* Set new value */
459 	spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
460 		sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
461 	spx5_wr(lower_32_bits(ts->tv_sec),
462 		sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
463 	spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
464 
465 	/* Apply new values */
466 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
467 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
468 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
469 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
470 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
471 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
472 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
473 
474 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
475 
476 	return 0;
477 }
478 
479 static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
480 				struct timespec64 *ts)
481 {
482 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
483 	struct sparx5 *sparx5 = phc->sparx5;
484 	unsigned long flags;
485 	time64_t s;
486 	s64 ns;
487 
488 	spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
489 
490 	spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
491 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
492 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
493 		 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
494 		 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
495 		 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
496 		 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
497 
498 	s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
499 	s <<= 32;
500 	s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
501 	ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
502 	ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
503 
504 	spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
505 
506 	/* Deal with negative values */
507 	if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
508 		s--;
509 		ns &= 0xf;
510 		ns += 999999984;
511 	}
512 
513 	set_normalized_timespec64(ts, s, ns);
514 	return 0;
515 }
516 
517 static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
518 {
519 	struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
520 	struct sparx5 *sparx5 = phc->sparx5;
521 
522 	if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
523 		unsigned long flags;
524 
525 		spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
526 
527 		/* Must be in IDLE mode before the time can be loaded */
528 		spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
529 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
530 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
531 			 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
532 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
533 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
534 			 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
535 
536 		spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
537 			sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
538 
539 		/* Adjust time with the value of PTP_TOD_NSEC */
540 		spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
541 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
542 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
543 			 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
544 			 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
545 			 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
546 			 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
547 
548 		spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
549 	} else {
550 		/* Fall back using sparx5_ptp_settime64 which is not exact */
551 		struct timespec64 ts;
552 		u64 now;
553 
554 		sparx5_ptp_gettime64(ptp, &ts);
555 
556 		now = ktime_to_ns(timespec64_to_ktime(ts));
557 		ts = ns_to_timespec64(now + delta);
558 
559 		sparx5_ptp_settime64(ptp, &ts);
560 	}
561 
562 	return 0;
563 }
564 
565 static struct ptp_clock_info sparx5_ptp_clock_info = {
566 	.owner		= THIS_MODULE,
567 	.name		= "sparx5 ptp",
568 	.max_adj	= 200000,
569 	.gettime64	= sparx5_ptp_gettime64,
570 	.settime64	= sparx5_ptp_settime64,
571 	.adjtime	= sparx5_ptp_adjtime,
572 	.adjfine	= sparx5_ptp_adjfine,
573 };
574 
575 static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
576 			       int index,
577 			       struct ptp_clock_info *clock_info)
578 {
579 	struct sparx5_phc *phc = &sparx5->phc[index];
580 
581 	phc->info = *clock_info;
582 	phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
583 	if (IS_ERR(phc->clock))
584 		return PTR_ERR(phc->clock);
585 
586 	phc->index = index;
587 	phc->sparx5 = sparx5;
588 
589 	/* PTP Rx stamping is always enabled.  */
590 	phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
591 
592 	return 0;
593 }
594 
595 int sparx5_ptp_init(struct sparx5 *sparx5)
596 {
597 	u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
598 	struct sparx5_port *port;
599 	int err, i;
600 
601 	if (!sparx5->ptp)
602 		return 0;
603 
604 	for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
605 		err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
606 		if (err)
607 			return err;
608 	}
609 
610 	spin_lock_init(&sparx5->ptp_clock_lock);
611 	spin_lock_init(&sparx5->ptp_ts_id_lock);
612 	mutex_init(&sparx5->ptp_lock);
613 
614 	/* Disable master counters */
615 	spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
616 
617 	/* Configure the nominal TOD increment per clock cycle */
618 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
619 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
620 		 sparx5, PTP_PTP_DOM_CFG);
621 
622 	for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
623 		spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
624 			PTP_CLK_PER_CFG(i, 0));
625 		spx5_wr((u32)(tod_adj >> 32), sparx5,
626 			PTP_CLK_PER_CFG(i, 1));
627 	}
628 
629 	spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
630 		 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
631 		 sparx5, PTP_PTP_DOM_CFG);
632 
633 	/* Enable master counters */
634 	spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
635 
636 	for (i = 0; i < sparx5->port_count; i++) {
637 		port = sparx5->ports[i];
638 		if (!port)
639 			continue;
640 
641 		skb_queue_head_init(&port->tx_skbs);
642 	}
643 
644 	return 0;
645 }
646 
647 void sparx5_ptp_deinit(struct sparx5 *sparx5)
648 {
649 	struct sparx5_port *port;
650 	int i;
651 
652 	for (i = 0; i < sparx5->port_count; i++) {
653 		port = sparx5->ports[i];
654 		if (!port)
655 			continue;
656 
657 		skb_queue_purge(&port->tx_skbs);
658 	}
659 
660 	for (i = 0; i < SPARX5_PHC_COUNT; ++i)
661 		ptp_clock_unregister(sparx5->phc[i].clock);
662 }
663 
664 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
665 			 u64 timestamp)
666 {
667 	struct skb_shared_hwtstamps *shhwtstamps;
668 	struct sparx5_phc *phc;
669 	struct timespec64 ts;
670 	u64 full_ts_in_ns;
671 
672 	if (!sparx5->ptp)
673 		return;
674 
675 	phc = &sparx5->phc[SPARX5_PHC_PORT];
676 	sparx5_ptp_gettime64(&phc->info, &ts);
677 
678 	if (ts.tv_nsec < timestamp)
679 		ts.tv_sec--;
680 	ts.tv_nsec = timestamp;
681 	full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
682 
683 	shhwtstamps = skb_hwtstamps(skb);
684 	shhwtstamps->hwtstamp = full_ts_in_ns;
685 }
686