1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 *
6 * The Sparx5 Chip Register Model can be browsed at this location:
7 * https://github.com/microchip-ung/sparx-5_reginfo
8 */
9 #include <linux/ptp_classify.h>
10
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13
14 #define SPARX5_MAX_PTP_ID 512
15
16 #define TOD_ACC_PIN 0x4
17
18 enum {
19 PTP_PIN_ACTION_IDLE = 0,
20 PTP_PIN_ACTION_LOAD,
21 PTP_PIN_ACTION_SAVE,
22 PTP_PIN_ACTION_CLOCK,
23 PTP_PIN_ACTION_DELTA,
24 PTP_PIN_ACTION_TOD
25 };
26
sparx5_ptp_get_1ppm(struct sparx5 * sparx5)27 static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
28 {
29 /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
30 * 1.99609375000(500), 3.99218750000(250) as reference
31 * The value is calculated as following:
32 * (1/1000000)/((2^-59)/X)
33 */
34
35 u64 res = 0;
36
37 switch (sparx5->coreclock) {
38 case SPX5_CORE_CLOCK_250MHZ:
39 res = 2301339409586;
40 break;
41 case SPX5_CORE_CLOCK_500MHZ:
42 res = 1150669704793;
43 break;
44 case SPX5_CORE_CLOCK_625MHZ:
45 res = 920535763834;
46 break;
47 default:
48 WARN(1, "Invalid core clock");
49 break;
50 }
51
52 return res;
53 }
54
sparx5_ptp_get_nominal_value(struct sparx5 * sparx5)55 static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
56 {
57 u64 res = 0;
58
59 switch (sparx5->coreclock) {
60 case SPX5_CORE_CLOCK_250MHZ:
61 res = 0x1FF0000000000000;
62 break;
63 case SPX5_CORE_CLOCK_500MHZ:
64 res = 0x0FF8000000000000;
65 break;
66 case SPX5_CORE_CLOCK_625MHZ:
67 res = 0x0CC6666666666666;
68 break;
69 default:
70 WARN(1, "Invalid core clock");
71 break;
72 }
73
74 return res;
75 }
76
sparx5_ptp_hwtstamp_set(struct sparx5_port * port,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)77 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port,
78 struct kernel_hwtstamp_config *cfg,
79 struct netlink_ext_ack *extack)
80 {
81 struct sparx5 *sparx5 = port->sparx5;
82 struct sparx5_phc *phc;
83
84 /* For now don't allow to run ptp on ports that are part of a bridge,
85 * because in case of transparent clock the HW will still forward the
86 * frames, so there would be duplicate frames
87 */
88
89 if (test_bit(port->portno, sparx5->bridge_mask))
90 return -EINVAL;
91
92 switch (cfg->tx_type) {
93 case HWTSTAMP_TX_ON:
94 port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
95 break;
96 case HWTSTAMP_TX_ONESTEP_SYNC:
97 port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
98 break;
99 case HWTSTAMP_TX_OFF:
100 port->ptp_cmd = IFH_REW_OP_NOOP;
101 break;
102 default:
103 return -ERANGE;
104 }
105
106 switch (cfg->rx_filter) {
107 case HWTSTAMP_FILTER_NONE:
108 break;
109 case HWTSTAMP_FILTER_ALL:
110 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
111 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
112 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
113 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
114 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
115 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
116 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
117 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
118 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
119 case HWTSTAMP_FILTER_PTP_V2_EVENT:
120 case HWTSTAMP_FILTER_PTP_V2_SYNC:
121 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
122 case HWTSTAMP_FILTER_NTP_ALL:
123 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
124 break;
125 default:
126 return -ERANGE;
127 }
128
129 /* Commit back the result & save it */
130 mutex_lock(&sparx5->ptp_lock);
131 phc = &sparx5->phc[SPARX5_PHC_PORT];
132 phc->hwtstamp_config = *cfg;
133 mutex_unlock(&sparx5->ptp_lock);
134
135 return 0;
136 }
137
sparx5_ptp_hwtstamp_get(struct sparx5_port * port,struct kernel_hwtstamp_config * cfg)138 void sparx5_ptp_hwtstamp_get(struct sparx5_port *port,
139 struct kernel_hwtstamp_config *cfg)
140 {
141 struct sparx5 *sparx5 = port->sparx5;
142 struct sparx5_phc *phc;
143
144 phc = &sparx5->phc[SPARX5_PHC_PORT];
145 *cfg = phc->hwtstamp_config;
146 }
147
sparx5_ptp_classify(struct sparx5_port * port,struct sk_buff * skb,u8 * rew_op,u8 * pdu_type,u8 * pdu_w16_offset)148 static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
149 u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
150 {
151 struct ptp_header *header;
152 u8 msgtype;
153 int type;
154
155 if (port->ptp_cmd == IFH_REW_OP_NOOP) {
156 *rew_op = IFH_REW_OP_NOOP;
157 *pdu_type = IFH_PDU_TYPE_NONE;
158 *pdu_w16_offset = 0;
159 return;
160 }
161
162 type = ptp_classify_raw(skb);
163 if (type == PTP_CLASS_NONE) {
164 *rew_op = IFH_REW_OP_NOOP;
165 *pdu_type = IFH_PDU_TYPE_NONE;
166 *pdu_w16_offset = 0;
167 return;
168 }
169
170 header = ptp_parse_header(skb, type);
171 if (!header) {
172 *rew_op = IFH_REW_OP_NOOP;
173 *pdu_type = IFH_PDU_TYPE_NONE;
174 *pdu_w16_offset = 0;
175 return;
176 }
177
178 *pdu_w16_offset = 7;
179 if (type & PTP_CLASS_L2)
180 *pdu_type = IFH_PDU_TYPE_PTP;
181 if (type & PTP_CLASS_IPV4)
182 *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
183 if (type & PTP_CLASS_IPV6)
184 *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
185
186 if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
187 *rew_op = IFH_REW_OP_TWO_STEP_PTP;
188 return;
189 }
190
191 /* If it is sync and run 1 step then set the correct operation,
192 * otherwise run as 2 step
193 */
194 msgtype = ptp_get_msgtype(header, type);
195 if ((msgtype & 0xf) == 0) {
196 *rew_op = IFH_REW_OP_ONE_STEP_PTP;
197 return;
198 }
199
200 *rew_op = IFH_REW_OP_TWO_STEP_PTP;
201 }
202
sparx5_ptp_txtstamp_old_release(struct sparx5_port * port)203 static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
204 {
205 struct sk_buff *skb, *skb_tmp;
206 unsigned long flags;
207
208 spin_lock_irqsave(&port->tx_skbs.lock, flags);
209 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
210 if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
211 jiffies)
212 break;
213
214 __skb_unlink(skb, &port->tx_skbs);
215 dev_kfree_skb_any(skb);
216 }
217 spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
218 }
219
sparx5_ptp_txtstamp_request(struct sparx5_port * port,struct sk_buff * skb)220 int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
221 struct sk_buff *skb)
222 {
223 struct sparx5 *sparx5 = port->sparx5;
224 u8 rew_op, pdu_type, pdu_w16_offset;
225 unsigned long flags;
226
227 sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
228 SPARX5_SKB_CB(skb)->rew_op = rew_op;
229 SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
230 SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
231
232 if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
233 return 0;
234
235 sparx5_ptp_txtstamp_old_release(port);
236
237 spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
238 if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
239 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
240 return -EBUSY;
241 }
242
243 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
244
245 skb_queue_tail(&port->tx_skbs, skb);
246 SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
247 SPARX5_SKB_CB(skb)->jiffies = jiffies;
248
249 sparx5->ptp_skbs++;
250 port->ts_id++;
251 if (port->ts_id == SPARX5_MAX_PTP_ID)
252 port->ts_id = 0;
253
254 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
255
256 return 0;
257 }
258
sparx5_ptp_txtstamp_release(struct sparx5_port * port,struct sk_buff * skb)259 void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
260 struct sk_buff *skb)
261 {
262 struct sparx5 *sparx5 = port->sparx5;
263 unsigned long flags;
264
265 spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
266 port->ts_id--;
267 sparx5->ptp_skbs--;
268 skb_unlink(skb, &port->tx_skbs);
269 spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
270 }
271
sparx5_get_hwtimestamp(struct sparx5 * sparx5,struct timespec64 * ts,u32 nsec)272 static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
273 struct timespec64 *ts,
274 u32 nsec)
275 {
276 /* Read current PTP time to get seconds */
277 unsigned long flags;
278 u32 curr_nsec;
279
280 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
281
282 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
283 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
284 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
285 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
286 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
287 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
288 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
289
290 ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
291 curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
292
293 ts->tv_nsec = nsec;
294
295 /* Sec has incremented since the ts was registered */
296 if (curr_nsec < nsec)
297 ts->tv_sec--;
298
299 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
300 }
301
sparx5_ptp_irq_handler(int irq,void * args)302 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
303 {
304 int budget = SPARX5_MAX_PTP_ID;
305 struct sparx5 *sparx5 = args;
306
307 while (budget--) {
308 struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
309 struct skb_shared_hwtstamps shhwtstamps;
310 struct sparx5_port *port;
311 struct timespec64 ts;
312 unsigned long flags;
313 u32 val, id, txport;
314 u32 delay;
315
316 val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
317
318 /* Check if a timestamp can be retrieved */
319 if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
320 break;
321
322 WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
323
324 if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
325 continue;
326
327 /* Retrieve the ts Tx port */
328 txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
329
330 /* Retrieve its associated skb */
331 port = sparx5->ports[txport];
332
333 /* Retrieve the delay */
334 delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
335 delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
336
337 /* Get next timestamp from fifo, which needs to be the
338 * rx timestamp which represents the id of the frame
339 */
340 spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
341 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
342 sparx5, REW_PTP_TWOSTEP_CTRL);
343
344 val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
345
346 /* Check if a timestamp can be retried */
347 if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
348 break;
349
350 /* Read RX timestamping to get the ID */
351 id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
352 id <<= 8;
353 id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
354
355 spin_lock_irqsave(&port->tx_skbs.lock, flags);
356 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
357 if (SPARX5_SKB_CB(skb)->ts_id != id)
358 continue;
359
360 __skb_unlink(skb, &port->tx_skbs);
361 skb_match = skb;
362 break;
363 }
364 spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
365
366 /* Next ts */
367 spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
368 REW_PTP_TWOSTEP_CTRL_PTP_NXT,
369 sparx5, REW_PTP_TWOSTEP_CTRL);
370
371 if (WARN_ON(!skb_match))
372 continue;
373
374 spin_lock(&sparx5->ptp_ts_id_lock);
375 sparx5->ptp_skbs--;
376 spin_unlock(&sparx5->ptp_ts_id_lock);
377
378 /* Get the h/w timestamp */
379 sparx5_get_hwtimestamp(sparx5, &ts, delay);
380
381 /* Set the timestamp into the skb */
382 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
383 skb_tstamp_tx(skb_match, &shhwtstamps);
384
385 dev_kfree_skb_any(skb_match);
386 }
387
388 return IRQ_HANDLED;
389 }
390
sparx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)391 static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
392 {
393 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
394 struct sparx5 *sparx5 = phc->sparx5;
395 unsigned long flags;
396 bool neg_adj = 0;
397 u64 tod_inc;
398 u64 ref;
399
400 if (!scaled_ppm)
401 return 0;
402
403 if (scaled_ppm < 0) {
404 neg_adj = 1;
405 scaled_ppm = -scaled_ppm;
406 }
407
408 tod_inc = sparx5_ptp_get_nominal_value(sparx5);
409
410 /* The multiplication is split in 2 separate additions because of
411 * overflow issues. If scaled_ppm with 16bit fractional part was bigger
412 * than 20ppm then we got overflow.
413 */
414 ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
415 ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
416 tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
417
418 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
419
420 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
421 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
422 sparx5, PTP_PTP_DOM_CFG);
423
424 spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
425 PTP_CLK_PER_CFG(phc->index, 0));
426 spx5_wr((u32)(tod_inc >> 32), sparx5,
427 PTP_CLK_PER_CFG(phc->index, 1));
428
429 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
430 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
431 PTP_PTP_DOM_CFG);
432
433 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
434
435 return 0;
436 }
437
sparx5_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)438 static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
439 const struct timespec64 *ts)
440 {
441 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
442 struct sparx5 *sparx5 = phc->sparx5;
443 unsigned long flags;
444
445 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
446
447 /* Must be in IDLE mode before the time can be loaded */
448 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
449 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
450 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
451 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
452 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
453 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
454 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
455
456 /* Set new value */
457 spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
458 sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
459 spx5_wr(lower_32_bits(ts->tv_sec),
460 sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
461 spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
462
463 /* Apply new values */
464 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
465 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
466 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
467 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
468 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
469 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
470 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
471
472 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
473
474 return 0;
475 }
476
sparx5_ptp_gettime64(struct ptp_clock_info * ptp,struct timespec64 * ts)477 int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
478 {
479 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
480 struct sparx5 *sparx5 = phc->sparx5;
481 unsigned long flags;
482 time64_t s;
483 s64 ns;
484
485 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
486
487 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
488 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
489 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
490 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
491 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
492 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
493 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
494
495 s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
496 s <<= 32;
497 s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
498 ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
499 ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
500
501 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
502
503 /* Deal with negative values */
504 if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
505 s--;
506 ns &= 0xf;
507 ns += 999999984;
508 }
509
510 set_normalized_timespec64(ts, s, ns);
511 return 0;
512 }
513
sparx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)514 static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
515 {
516 struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
517 struct sparx5 *sparx5 = phc->sparx5;
518
519 if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
520 unsigned long flags;
521
522 spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
523
524 /* Must be in IDLE mode before the time can be loaded */
525 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
526 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
527 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
528 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
529 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
530 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
531 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
532
533 spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
534 sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
535
536 /* Adjust time with the value of PTP_TOD_NSEC */
537 spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
538 PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
539 PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
540 PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
541 PTP_PTP_PIN_CFG_PTP_PIN_DOM |
542 PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
543 sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
544
545 spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
546 } else {
547 /* Fall back using sparx5_ptp_settime64 which is not exact */
548 struct timespec64 ts;
549 u64 now;
550
551 sparx5_ptp_gettime64(ptp, &ts);
552
553 now = ktime_to_ns(timespec64_to_ktime(ts));
554 ts = ns_to_timespec64(now + delta);
555
556 sparx5_ptp_settime64(ptp, &ts);
557 }
558
559 return 0;
560 }
561
562 static struct ptp_clock_info sparx5_ptp_clock_info = {
563 .owner = THIS_MODULE,
564 .name = "sparx5 ptp",
565 .max_adj = 200000,
566 .gettime64 = sparx5_ptp_gettime64,
567 .settime64 = sparx5_ptp_settime64,
568 .adjtime = sparx5_ptp_adjtime,
569 .adjfine = sparx5_ptp_adjfine,
570 };
571
sparx5_ptp_phc_init(struct sparx5 * sparx5,int index,struct ptp_clock_info * clock_info)572 static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
573 int index,
574 struct ptp_clock_info *clock_info)
575 {
576 struct sparx5_phc *phc = &sparx5->phc[index];
577
578 phc->info = *clock_info;
579 phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
580 if (IS_ERR(phc->clock))
581 return PTR_ERR(phc->clock);
582
583 phc->index = index;
584 phc->sparx5 = sparx5;
585
586 /* PTP Rx stamping is always enabled. */
587 phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
588
589 return 0;
590 }
591
sparx5_ptp_init(struct sparx5 * sparx5)592 int sparx5_ptp_init(struct sparx5 *sparx5)
593 {
594 u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
595 struct sparx5_port *port;
596 int err, i;
597
598 if (!sparx5->ptp)
599 return 0;
600
601 for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
602 err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
603 if (err)
604 return err;
605 }
606
607 spin_lock_init(&sparx5->ptp_clock_lock);
608 spin_lock_init(&sparx5->ptp_ts_id_lock);
609 mutex_init(&sparx5->ptp_lock);
610
611 /* Disable master counters */
612 spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
613
614 /* Configure the nominal TOD increment per clock cycle */
615 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
616 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
617 sparx5, PTP_PTP_DOM_CFG);
618
619 for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
620 spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
621 PTP_CLK_PER_CFG(i, 0));
622 spx5_wr((u32)(tod_adj >> 32), sparx5,
623 PTP_CLK_PER_CFG(i, 1));
624 }
625
626 spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
627 PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
628 sparx5, PTP_PTP_DOM_CFG);
629
630 /* Enable master counters */
631 spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
632
633 for (i = 0; i < SPX5_PORTS; i++) {
634 port = sparx5->ports[i];
635 if (!port)
636 continue;
637
638 skb_queue_head_init(&port->tx_skbs);
639 }
640
641 return 0;
642 }
643
sparx5_ptp_deinit(struct sparx5 * sparx5)644 void sparx5_ptp_deinit(struct sparx5 *sparx5)
645 {
646 struct sparx5_port *port;
647 int i;
648
649 for (i = 0; i < SPX5_PORTS; i++) {
650 port = sparx5->ports[i];
651 if (!port)
652 continue;
653
654 skb_queue_purge(&port->tx_skbs);
655 }
656
657 for (i = 0; i < SPARX5_PHC_COUNT; ++i)
658 ptp_clock_unregister(sparx5->phc[i].clock);
659 }
660
sparx5_ptp_rxtstamp(struct sparx5 * sparx5,struct sk_buff * skb,u64 timestamp)661 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
662 u64 timestamp)
663 {
664 struct skb_shared_hwtstamps *shhwtstamps;
665 struct sparx5_phc *phc;
666 struct timespec64 ts;
667 u64 full_ts_in_ns;
668
669 if (!sparx5->ptp)
670 return;
671
672 phc = &sparx5->phc[SPARX5_PHC_PORT];
673 sparx5_ptp_gettime64(&phc->info, &ts);
674
675 if (ts.tv_nsec < timestamp)
676 ts.tv_sec--;
677 ts.tv_nsec = timestamp;
678 full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
679
680 shhwtstamps = skb_hwtstamps(skb);
681 shhwtstamps->hwtstamp = full_ts_in_ns;
682 }
683