1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell PTP driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/hrtimer.h>
13 #include <linux/ktime.h>
14 
15 #include "mbox.h"
16 #include "ptp.h"
17 #include "rvu.h"
18 
19 #define DRV_NAME				"Marvell PTP Driver"
20 
21 #define PCI_DEVID_OCTEONTX2_PTP			0xA00C
22 #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP		0xB100
23 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP		0xB200
24 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP		0xB300
25 #define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP	0xB400
26 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP		0xB500
27 #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP	0xB600
28 #define PCI_DEVID_OCTEONTX2_RST			0xA085
29 #define PCI_DEVID_CN10K_PTP			0xA09E
30 #define PCI_SUBSYS_DEVID_CN10K_A_PTP		0xB900
31 #define PCI_SUBSYS_DEVID_CNF10K_A_PTP		0xBA00
32 #define PCI_SUBSYS_DEVID_CNF10K_B_PTP		0xBC00
33 
34 #define PCI_PTP_BAR_NO				0
35 
36 #define PTP_CLOCK_CFG				0xF00ULL
37 #define PTP_CLOCK_CFG_PTP_EN			BIT_ULL(0)
38 #define PTP_CLOCK_CFG_EXT_CLK_EN		BIT_ULL(1)
39 #define PTP_CLOCK_CFG_EXT_CLK_IN_MASK		GENMASK_ULL(7, 2)
40 #define PTP_CLOCK_CFG_TSTMP_EDGE		BIT_ULL(9)
41 #define PTP_CLOCK_CFG_TSTMP_EN			BIT_ULL(8)
42 #define PTP_CLOCK_CFG_TSTMP_IN_MASK		GENMASK_ULL(15, 10)
43 #define PTP_CLOCK_CFG_ATOMIC_OP_MASK		GENMASK_ULL(28, 26)
44 #define PTP_CLOCK_CFG_PPS_EN			BIT_ULL(30)
45 #define PTP_CLOCK_CFG_PPS_INV			BIT_ULL(31)
46 
47 #define PTP_PPS_HI_INCR				0xF60ULL
48 #define PTP_PPS_LO_INCR				0xF68ULL
49 #define PTP_PPS_THRESH_HI			0xF58ULL
50 
51 #define PTP_CLOCK_LO				0xF08ULL
52 #define PTP_CLOCK_HI				0xF10ULL
53 #define PTP_CLOCK_COMP				0xF18ULL
54 #define PTP_TIMESTAMP				0xF20ULL
55 #define PTP_CLOCK_SEC				0xFD0ULL
56 #define PTP_SEC_ROLLOVER			0xFD8ULL
57 /* Atomic update related CSRs */
58 #define PTP_FRNS_TIMESTAMP			0xFE0ULL
59 #define PTP_NXT_ROLLOVER_SET			0xFE8ULL
60 #define PTP_CURR_ROLLOVER_SET			0xFF0ULL
61 #define PTP_NANO_TIMESTAMP			0xFF8ULL
62 #define PTP_SEC_TIMESTAMP			0x1000ULL
63 
64 #define CYCLE_MULT				1000
65 
66 #define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0)
67 #define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1)
68 
69 /* PTP atomic update operation type */
70 enum atomic_opcode {
71 	ATOMIC_SET = 1,
72 	ATOMIC_INC = 3,
73 	ATOMIC_DEC = 4
74 };
75 
76 static struct ptp *first_ptp_block;
77 static const struct pci_device_id ptp_id_table[];
78 
79 static bool is_ptp_dev_cnf10ka(struct ptp *ptp)
80 {
81 	return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP;
82 }
83 
84 static bool is_ptp_dev_cn10ka(struct ptp *ptp)
85 {
86 	return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP;
87 }
88 
89 static bool cn10k_ptp_errata(struct ptp *ptp)
90 {
91 	if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
92 	    (is_rev_A0(ptp) || is_rev_A1(ptp)))
93 		return true;
94 
95 	return false;
96 }
97 
98 static bool is_tstmp_atomic_update_supported(struct rvu *rvu)
99 {
100 	struct ptp *ptp = rvu->ptp;
101 
102 	if (is_rvu_otx2(rvu))
103 		return false;
104 
105 	/* On older silicon variants of CN10K, atomic update feature
106 	 * is not available.
107 	 */
108 	if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
109 	    (is_rev_A0(ptp) || is_rev_A1(ptp)))
110 		return false;
111 
112 	return true;
113 }
114 
115 static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
116 {
117 	struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
118 	ktime_t curr_ts = ktime_get();
119 	ktime_t delta_ns, period_ns;
120 	u64 ptp_clock_hi;
121 
122 	/* calculate the elapsed time since last restart */
123 	delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
124 
125 	/* if the ptp clock value has crossed 0.5 seconds,
126 	 * its too late to update pps threshold value, so
127 	 * update threshold after 1 second.
128 	 */
129 	ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
130 	if (ptp_clock_hi > 500000000) {
131 		period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
132 	} else {
133 		writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
134 		period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
135 	}
136 
137 	hrtimer_forward_now(hrtimer, period_ns);
138 	ptp->last_ts = curr_ts;
139 
140 	return HRTIMER_RESTART;
141 }
142 
143 static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
144 {
145 	ktime_t period_ns;
146 
147 	period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
148 	hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
149 	ptp->last_ts = ktime_get();
150 }
151 
152 static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
153 {
154 	u64 sec, sec1, nsec;
155 	unsigned long flags;
156 
157 	spin_lock_irqsave(&ptp->ptp_lock, flags);
158 	sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
159 	nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
160 	sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
161 	/* check nsec rollover */
162 	if (sec1 > sec) {
163 		nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
164 		sec = sec1;
165 	}
166 	spin_unlock_irqrestore(&ptp->ptp_lock, flags);
167 
168 	return sec * NSEC_PER_SEC + nsec;
169 }
170 
171 static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
172 {
173 	return readq(ptp->reg_base + PTP_CLOCK_HI);
174 }
175 
176 static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
177 {
178 	u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
179 	u32 ptp_clock_nsec, cycle_time;
180 	int cycle;
181 
182 	/* Errata:
183 	 * Issue #1: At the time of 1 sec rollover of the nano-second counter,
184 	 * the nano-second counter is set to 0. However, it should be set to
185 	 * (existing counter_value - 10^9).
186 	 *
187 	 * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
188 	 * It should roll over at 0x3B9A_CA00.
189 	 */
190 
191 	/* calculate ptp_clock_comp value */
192 	comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
193 	/* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
194 	cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
195 	/* cycles per sec */
196 	cycles_per_sec = ptp_clock_freq;
197 
198 	/* check whether ptp nanosecond counter rolls over early */
199 	cycle = cycles_per_sec - 1;
200 	ptp_clock_nsec = (cycle * comp) >> 32;
201 	while (ptp_clock_nsec < NSEC_PER_SEC) {
202 		if (ptp_clock_nsec == 0x3B9AC9FF)
203 			goto calc_adj_comp;
204 		cycle++;
205 		ptp_clock_nsec = (cycle * comp) >> 32;
206 	}
207 	/* compute nanoseconds lost per second when nsec counter rolls over */
208 	ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
209 	/* calculate ptp_clock_comp adjustment */
210 	if (ns_drift > 0) {
211 		adj = comp * ns_drift;
212 		adj = adj / 1000000000ULL;
213 	}
214 	/* speed up the ptp clock to account for nanoseconds lost */
215 	comp += adj;
216 	return comp;
217 
218 calc_adj_comp:
219 	/* slow down the ptp clock to not rollover early */
220 	adj = comp * cycle_time;
221 	adj = adj / 1000000000ULL;
222 	adj = adj / CYCLE_MULT;
223 	comp -= adj;
224 
225 	return comp;
226 }
227 
228 struct ptp *ptp_get(void)
229 {
230 	struct ptp *ptp = first_ptp_block;
231 
232 	/* Check PTP block is present in hardware */
233 	if (!pci_dev_present(ptp_id_table))
234 		return ERR_PTR(-ENODEV);
235 	/* Check driver is bound to PTP block */
236 	if (!ptp)
237 		ptp = ERR_PTR(-EPROBE_DEFER);
238 	else if (!IS_ERR(ptp))
239 		pci_dev_get(ptp->pdev);
240 
241 	return ptp;
242 }
243 
244 void ptp_put(struct ptp *ptp)
245 {
246 	if (!ptp)
247 		return;
248 
249 	pci_dev_put(ptp->pdev);
250 }
251 
252 static void ptp_atomic_update(struct ptp *ptp, u64 timestamp)
253 {
254 	u64 regval, curr_rollover_set, nxt_rollover_set;
255 
256 	/* First setup NSECs and SECs */
257 	writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP);
258 	writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
259 	writeq(timestamp / NSEC_PER_SEC,
260 	       ptp->reg_base + PTP_SEC_TIMESTAMP);
261 
262 	nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC);
263 	curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC;
264 	writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
265 	writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
266 
267 	/* Now, initiate atomic update */
268 	regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
269 	regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
270 	regval |= (ATOMIC_SET << 26);
271 	writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
272 }
273 
274 static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta)
275 {
276 	bool neg_adj = false, atomic_inc_dec = false;
277 	u64 regval, ptp_clock_hi;
278 
279 	if (delta < 0) {
280 		delta = -delta;
281 		neg_adj = true;
282 	}
283 
284 	/* use atomic inc/dec when delta < 1 second */
285 	if (delta < NSEC_PER_SEC)
286 		atomic_inc_dec = true;
287 
288 	if (!atomic_inc_dec) {
289 		ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
290 		if (neg_adj) {
291 			if (ptp_clock_hi > delta)
292 				ptp_clock_hi -= delta;
293 			else
294 				ptp_clock_hi = delta - ptp_clock_hi;
295 		} else {
296 			ptp_clock_hi += delta;
297 		}
298 		ptp_atomic_update(ptp, ptp_clock_hi);
299 	} else {
300 		writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP);
301 		writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
302 
303 		/* initiate atomic inc/dec */
304 		regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
305 		regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
306 		regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26);
307 		writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
308 	}
309 }
310 
311 static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
312 {
313 	bool neg_adj = false;
314 	u32 freq, freq_adj;
315 	u64 comp, adj;
316 	s64 ppb;
317 
318 	if (scaled_ppm < 0) {
319 		neg_adj = true;
320 		scaled_ppm = -scaled_ppm;
321 	}
322 
323 	/* The hardware adds the clock compensation value to the PTP clock
324 	 * on every coprocessor clock cycle. Typical convention is that it
325 	 * represent number of nanosecond betwen each cycle. In this
326 	 * convention compensation value is in 64 bit fixed-point
327 	 * representation where upper 32 bits are number of nanoseconds
328 	 * and lower is fractions of nanosecond.
329 	 * The scaled_ppm represent the ratio in "parts per million" by which
330 	 * the compensation value should be corrected.
331 	 * To calculate new compenstation value we use 64bit fixed point
332 	 * arithmetic on following formula
333 	 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
334 	 * where tbase is the basic compensation value calculated
335 	 * initialy in the probe function.
336 	 */
337 	/* convert scaled_ppm to ppb */
338 	ppb = 1 + scaled_ppm;
339 	ppb *= 125;
340 	ppb >>= 13;
341 
342 	if (cn10k_ptp_errata(ptp)) {
343 		/* calculate the new frequency based on ppb */
344 		freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
345 		freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
346 		comp = ptp_calc_adjusted_comp(freq);
347 	} else {
348 		comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
349 		adj = comp * ppb;
350 		adj = div_u64(adj, 1000000000ull);
351 		comp = neg_adj ? comp - adj : comp + adj;
352 	}
353 	writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
354 
355 	return 0;
356 }
357 
358 static int ptp_get_clock(struct ptp *ptp, u64 *clk)
359 {
360 	/* Return the current PTP clock */
361 	*clk = ptp->read_ptp_tstmp(ptp);
362 
363 	return 0;
364 }
365 
366 void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
367 {
368 	struct ptp *ptp = rvu->ptp;
369 	struct pci_dev *pdev;
370 	u64 clock_comp;
371 	u64 clock_cfg;
372 
373 	if (!ptp)
374 		return;
375 
376 	pdev = ptp->pdev;
377 
378 	if (!sclk) {
379 		dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
380 		return;
381 	}
382 
383 	/* sclk is in MHz */
384 	ptp->clock_rate = sclk * 1000000;
385 
386 	/* Program the seconds rollover value to 1 second */
387 	if (is_tstmp_atomic_update_supported(rvu)) {
388 		writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP);
389 		writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
390 		writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP);
391 		writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
392 		writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
393 		writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
394 	}
395 
396 	/* Enable PTP clock */
397 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
398 
399 	if (ext_clk_freq) {
400 		ptp->clock_rate = ext_clk_freq;
401 		/* Set GPIO as PTP clock source */
402 		clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
403 		clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
404 	}
405 
406 	if (extts) {
407 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
408 		/* Set GPIO as timestamping source */
409 		clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
410 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
411 	}
412 
413 	clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
414 	clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
415 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
416 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
417 	clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
418 	clock_cfg |= (ATOMIC_SET << 26);
419 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
420 
421 	/* Set 50% duty cycle for 1Hz output */
422 	writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
423 	writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
424 	if (cn10k_ptp_errata(ptp)) {
425 		/* The ptp_clock_hi rollsover to zero once clock cycle before it
426 		 * reaches one second boundary. so, program the pps_lo_incr in
427 		 * such a way that the pps threshold value comparison at one
428 		 * second boundary will succeed and pps edge changes. After each
429 		 * one second boundary, the hrtimer handler will be invoked and
430 		 * reprograms the pps threshold value.
431 		 */
432 		ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
433 		writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
434 		       ptp->reg_base + PTP_PPS_LO_INCR);
435 	}
436 
437 	if (cn10k_ptp_errata(ptp))
438 		clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
439 	else
440 		clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
441 
442 	/* Initial compensation value to start the nanosecs counter */
443 	writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
444 }
445 
446 static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
447 {
448 	u64 timestamp;
449 
450 	if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) {
451 		timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
452 		*clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
453 	} else {
454 		*clk = readq(ptp->reg_base + PTP_TIMESTAMP);
455 	}
456 
457 	return 0;
458 }
459 
460 static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
461 {
462 	if (!cn10k_ptp_errata(ptp))
463 		writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
464 
465 	return 0;
466 }
467 
468 static int ptp_extts_on(struct ptp *ptp, int on)
469 {
470 	u64 ptp_clock_hi;
471 
472 	if (cn10k_ptp_errata(ptp)) {
473 		if (on) {
474 			ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
475 			ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
476 		} else {
477 			if (hrtimer_active(&ptp->hrtimer))
478 				hrtimer_cancel(&ptp->hrtimer);
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int ptp_probe(struct pci_dev *pdev,
486 		     const struct pci_device_id *ent)
487 {
488 	struct ptp *ptp;
489 	int err;
490 
491 	ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
492 	if (!ptp) {
493 		err = -ENOMEM;
494 		goto error;
495 	}
496 
497 	ptp->pdev = pdev;
498 
499 	err = pcim_enable_device(pdev);
500 	if (err)
501 		goto error_free;
502 
503 	err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
504 	if (err)
505 		goto error_free;
506 
507 	ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
508 
509 	pci_set_drvdata(pdev, ptp);
510 	if (!first_ptp_block)
511 		first_ptp_block = ptp;
512 
513 	spin_lock_init(&ptp->ptp_lock);
514 	if (cn10k_ptp_errata(ptp)) {
515 		ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
516 		hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
517 		ptp->hrtimer.function = ptp_reset_thresh;
518 	} else {
519 		ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
520 	}
521 
522 	return 0;
523 
524 error_free:
525 	kfree(ptp);
526 
527 error:
528 	/* For `ptp_get()` we need to differentiate between the case
529 	 * when the core has not tried to probe this device and the case when
530 	 * the probe failed.  In the later case we keep the error in
531 	 * `dev->driver_data`.
532 	 */
533 	pci_set_drvdata(pdev, ERR_PTR(err));
534 	if (!first_ptp_block)
535 		first_ptp_block = ERR_PTR(err);
536 
537 	return err;
538 }
539 
540 static void ptp_remove(struct pci_dev *pdev)
541 {
542 	struct ptp *ptp = pci_get_drvdata(pdev);
543 	u64 clock_cfg;
544 
545 	if (IS_ERR_OR_NULL(ptp))
546 		return;
547 
548 	if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
549 		hrtimer_cancel(&ptp->hrtimer);
550 
551 	/* Disable PTP clock */
552 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
553 	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
554 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
555 	kfree(ptp);
556 }
557 
558 static const struct pci_device_id ptp_id_table[] = {
559 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
560 			 PCI_VENDOR_ID_CAVIUM,
561 			 PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
562 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
563 			 PCI_VENDOR_ID_CAVIUM,
564 			 PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
565 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
566 			 PCI_VENDOR_ID_CAVIUM,
567 			 PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
568 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
569 			 PCI_VENDOR_ID_CAVIUM,
570 			 PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
571 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
572 			 PCI_VENDOR_ID_CAVIUM,
573 			 PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
574 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
575 			 PCI_VENDOR_ID_CAVIUM,
576 			 PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
577 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
578 	{ 0, }
579 };
580 
581 struct pci_driver ptp_driver = {
582 	.name = DRV_NAME,
583 	.id_table = ptp_id_table,
584 	.probe = ptp_probe,
585 	.remove = ptp_remove,
586 };
587 
588 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
589 			    struct ptp_rsp *rsp)
590 {
591 	int err = 0;
592 
593 	/* This function is the PTP mailbox handler invoked when
594 	 * called by AF consumers/netdev drivers via mailbox mechanism.
595 	 * It is used by netdev driver to get the PTP clock and to set
596 	 * frequency adjustments. Since mailbox can be called without
597 	 * notion of whether the driver is bound to ptp device below
598 	 * validation is needed as first step.
599 	 */
600 	if (!rvu->ptp)
601 		return -ENODEV;
602 
603 	switch (req->op) {
604 	case PTP_OP_ADJFINE:
605 		err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
606 		break;
607 	case PTP_OP_GET_CLOCK:
608 		err = ptp_get_clock(rvu->ptp, &rsp->clk);
609 		break;
610 	case PTP_OP_GET_TSTMP:
611 		err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
612 		break;
613 	case PTP_OP_SET_THRESH:
614 		err = ptp_set_thresh(rvu->ptp, req->thresh);
615 		break;
616 	case PTP_OP_EXTTS_ON:
617 		err = ptp_extts_on(rvu->ptp, req->extts_on);
618 		break;
619 	case PTP_OP_ADJTIME:
620 		ptp_atomic_adjtime(rvu->ptp, req->delta);
621 		break;
622 	case PTP_OP_SET_CLOCK:
623 		ptp_atomic_update(rvu->ptp, (u64)req->clk);
624 		break;
625 	default:
626 		err = -EINVAL;
627 		break;
628 	}
629 
630 	return err;
631 }
632 
633 int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req,
634 				 struct ptp_get_cap_rsp *rsp)
635 {
636 	if (!rvu->ptp)
637 		return -ENODEV;
638 
639 	if (is_tstmp_atomic_update_supported(rvu))
640 		rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE;
641 	else
642 		rsp->cap &= ~BIT_ULL_MASK(0);
643 
644 	return 0;
645 }
646