xref: /linux/drivers/net/ethernet/intel/ice/ice_ptp.c (revision 22118810)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7 
8 #define E810_OUT_PROP_DELAY_NS 1
9 
10 #define UNKNOWN_INCVAL_E82X 0x100000000ULL
11 
12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
13 	/* name    idx   func         chan */
14 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
15 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
16 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
17 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
18 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
19 };
20 
21 /**
22  * ice_get_sma_config_e810t
23  * @hw: pointer to the hw struct
24  * @ptp_pins: pointer to the ptp_pin_desc struture
25  *
26  * Read the configuration of the SMA control logic and put it into the
27  * ptp_pin_desc structure
28  */
29 static int
ice_get_sma_config_e810t(struct ice_hw * hw,struct ptp_pin_desc * ptp_pins)30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
31 {
32 	u8 data, i;
33 	int status;
34 
35 	/* Read initial pin state */
36 	status = ice_read_sma_ctrl_e810t(hw, &data);
37 	if (status)
38 		return status;
39 
40 	/* initialize with defaults */
41 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
42 		strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
43 			sizeof(ptp_pins[i].name));
44 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
45 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
46 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
47 	}
48 
49 	/* Parse SMA1/UFL1 */
50 	switch (data & ICE_SMA1_MASK_E810T) {
51 	case ICE_SMA1_MASK_E810T:
52 	default:
53 		ptp_pins[SMA1].func = PTP_PF_NONE;
54 		ptp_pins[UFL1].func = PTP_PF_NONE;
55 		break;
56 	case ICE_SMA1_DIR_EN_E810T:
57 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
58 		ptp_pins[UFL1].func = PTP_PF_NONE;
59 		break;
60 	case ICE_SMA1_TX_EN_E810T:
61 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
62 		ptp_pins[UFL1].func = PTP_PF_NONE;
63 		break;
64 	case 0:
65 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
66 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
67 		break;
68 	}
69 
70 	/* Parse SMA2/UFL2 */
71 	switch (data & ICE_SMA2_MASK_E810T) {
72 	case ICE_SMA2_MASK_E810T:
73 	default:
74 		ptp_pins[SMA2].func = PTP_PF_NONE;
75 		ptp_pins[UFL2].func = PTP_PF_NONE;
76 		break;
77 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
78 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
79 		ptp_pins[UFL2].func = PTP_PF_NONE;
80 		break;
81 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
82 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
83 		ptp_pins[UFL2].func = PTP_PF_NONE;
84 		break;
85 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
86 		ptp_pins[SMA2].func = PTP_PF_NONE;
87 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
88 		break;
89 	case ICE_SMA2_DIR_EN_E810T:
90 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
91 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
92 		break;
93 	}
94 
95 	return 0;
96 }
97 
98 /**
99  * ice_ptp_set_sma_config_e810t
100  * @hw: pointer to the hw struct
101  * @ptp_pins: pointer to the ptp_pin_desc struture
102  *
103  * Set the configuration of the SMA control logic based on the configuration in
104  * num_pins parameter
105  */
106 static int
ice_ptp_set_sma_config_e810t(struct ice_hw * hw,const struct ptp_pin_desc * ptp_pins)107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
108 			     const struct ptp_pin_desc *ptp_pins)
109 {
110 	int status;
111 	u8 data;
112 
113 	/* SMA1 and UFL1 cannot be set to TX at the same time */
114 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
115 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
116 		return -EINVAL;
117 
118 	/* SMA2 and UFL2 cannot be set to RX at the same time */
119 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
120 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
121 		return -EINVAL;
122 
123 	/* Read initial pin state value */
124 	status = ice_read_sma_ctrl_e810t(hw, &data);
125 	if (status)
126 		return status;
127 
128 	/* Set the right sate based on the desired configuration */
129 	data &= ~ICE_SMA1_MASK_E810T;
130 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
131 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
132 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
133 		data |= ICE_SMA1_MASK_E810T;
134 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
135 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
136 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
137 		data |= ICE_SMA1_TX_EN_E810T;
138 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
139 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
140 		/* U.FL 1 TX will always enable SMA 1 RX */
141 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
142 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
143 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
144 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
145 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
146 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
147 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
148 		data |= ICE_SMA1_DIR_EN_E810T;
149 	}
150 
151 	data &= ~ICE_SMA2_MASK_E810T;
152 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
153 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
154 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
155 		data |= ICE_SMA2_MASK_E810T;
156 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
157 			ptp_pins[UFL2].func == PTP_PF_NONE) {
158 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
159 		data |= (ICE_SMA2_TX_EN_E810T |
160 			 ICE_SMA2_UFL2_RX_DIS_E810T);
161 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
162 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
163 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
164 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
165 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
166 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
167 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
168 		data |= (ICE_SMA2_DIR_EN_E810T |
169 			 ICE_SMA2_UFL2_RX_DIS_E810T);
170 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
171 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
172 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
173 		data |= ICE_SMA2_DIR_EN_E810T;
174 	}
175 
176 	return ice_write_sma_ctrl_e810t(hw, data);
177 }
178 
179 /**
180  * ice_ptp_set_sma_e810t
181  * @info: the driver's PTP info structure
182  * @pin: pin index in kernel structure
183  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
184  *
185  * Set the configuration of a single SMA pin
186  */
187 static int
ice_ptp_set_sma_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func)188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
189 		      enum ptp_pin_function func)
190 {
191 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
192 	struct ice_pf *pf = ptp_info_to_pf(info);
193 	struct ice_hw *hw = &pf->hw;
194 	int err;
195 
196 	if (pin < SMA1 || func > PTP_PF_PEROUT)
197 		return -EOPNOTSUPP;
198 
199 	err = ice_get_sma_config_e810t(hw, ptp_pins);
200 	if (err)
201 		return err;
202 
203 	/* Disable the same function on the other pin sharing the channel */
204 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
205 		ptp_pins[UFL1].func = PTP_PF_NONE;
206 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
207 		ptp_pins[SMA1].func = PTP_PF_NONE;
208 
209 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
210 		ptp_pins[UFL2].func = PTP_PF_NONE;
211 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
212 		ptp_pins[SMA2].func = PTP_PF_NONE;
213 
214 	/* Set up new pin function in the temp table */
215 	ptp_pins[pin].func = func;
216 
217 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
218 }
219 
220 /**
221  * ice_verify_pin_e810t
222  * @info: the driver's PTP info structure
223  * @pin: Pin index
224  * @func: Assigned function
225  * @chan: Assigned channel
226  *
227  * Verify if pin supports requested pin function. If the Check pins consistency.
228  * Reconfigure the SMA logic attached to the given pin to enable its
229  * desired functionality
230  */
231 static int
ice_verify_pin_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
233 		     enum ptp_pin_function func, unsigned int chan)
234 {
235 	/* Don't allow channel reassignment */
236 	if (chan != ice_pin_desc_e810t[pin].chan)
237 		return -EOPNOTSUPP;
238 
239 	/* Check if functions are properly assigned */
240 	switch (func) {
241 	case PTP_PF_NONE:
242 		break;
243 	case PTP_PF_EXTTS:
244 		if (pin == UFL1)
245 			return -EOPNOTSUPP;
246 		break;
247 	case PTP_PF_PEROUT:
248 		if (pin == UFL2 || pin == GNSS)
249 			return -EOPNOTSUPP;
250 		break;
251 	case PTP_PF_PHYSYNC:
252 		return -EOPNOTSUPP;
253 	}
254 
255 	return ice_ptp_set_sma_e810t(info, pin, func);
256 }
257 
258 /**
259  * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
260  * @pf: Board private structure
261  *
262  * Program the device to respond appropriately to the Tx timestamp interrupt
263  * cause.
264  */
ice_ptp_cfg_tx_interrupt(struct ice_pf * pf)265 static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
266 {
267 	struct ice_hw *hw = &pf->hw;
268 	bool enable;
269 	u32 val;
270 
271 	switch (pf->ptp.tx_interrupt_mode) {
272 	case ICE_PTP_TX_INTERRUPT_ALL:
273 		/* React to interrupts across all quads. */
274 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
275 		enable = true;
276 		break;
277 	case ICE_PTP_TX_INTERRUPT_NONE:
278 		/* Do not react to interrupts on any quad. */
279 		wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
280 		enable = false;
281 		break;
282 	case ICE_PTP_TX_INTERRUPT_SELF:
283 	default:
284 		enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
285 		break;
286 	}
287 
288 	/* Configure the Tx timestamp interrupt */
289 	val = rd32(hw, PFINT_OICR_ENA);
290 	if (enable)
291 		val |= PFINT_OICR_TSYN_TX_M;
292 	else
293 		val &= ~PFINT_OICR_TSYN_TX_M;
294 	wr32(hw, PFINT_OICR_ENA, val);
295 }
296 
297 /**
298  * ice_set_rx_tstamp - Enable or disable Rx timestamping
299  * @pf: The PF pointer to search in
300  * @on: bool value for whether timestamps are enabled or disabled
301  */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)302 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
303 {
304 	struct ice_vsi *vsi;
305 	u16 i;
306 
307 	vsi = ice_get_main_vsi(pf);
308 	if (!vsi || !vsi->rx_rings)
309 		return;
310 
311 	/* Set the timestamp flag for all the Rx rings */
312 	ice_for_each_rxq(vsi, i) {
313 		if (!vsi->rx_rings[i])
314 			continue;
315 		vsi->rx_rings[i]->ptp_rx = on;
316 	}
317 }
318 
319 /**
320  * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
321  * @pf: Board private structure
322  *
323  * Called during preparation for reset to temporarily disable timestamping on
324  * the device. Called during remove to disable timestamping while cleaning up
325  * driver resources.
326  */
ice_ptp_disable_timestamp_mode(struct ice_pf * pf)327 static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
328 {
329 	struct ice_hw *hw = &pf->hw;
330 	u32 val;
331 
332 	val = rd32(hw, PFINT_OICR_ENA);
333 	val &= ~PFINT_OICR_TSYN_TX_M;
334 	wr32(hw, PFINT_OICR_ENA, val);
335 
336 	ice_set_rx_tstamp(pf, false);
337 }
338 
339 /**
340  * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
341  * @pf: Board private structure
342  *
343  * Called at the end of rebuild to restore timestamp configuration after
344  * a device reset.
345  */
ice_ptp_restore_timestamp_mode(struct ice_pf * pf)346 void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
347 {
348 	struct ice_hw *hw = &pf->hw;
349 	bool enable_rx;
350 
351 	ice_ptp_cfg_tx_interrupt(pf);
352 
353 	enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
354 	ice_set_rx_tstamp(pf, enable_rx);
355 
356 	/* Trigger an immediate software interrupt to ensure that timestamps
357 	 * which occurred during reset are handled now.
358 	 */
359 	wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
360 	ice_flush(hw);
361 }
362 
363 /**
364  * ice_ptp_read_src_clk_reg - Read the source clock register
365  * @pf: Board private structure
366  * @sts: Optional parameter for holding a pair of system timestamps from
367  *       the system clock. Will be ignored if NULL is given.
368  */
369 static u64
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)370 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
371 {
372 	struct ice_hw *hw = &pf->hw;
373 	u32 hi, lo, lo2;
374 	u8 tmr_idx;
375 
376 	tmr_idx = ice_get_ptp_src_clock_index(hw);
377 	guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
378 	/* Read the system timestamp pre PHC read */
379 	ptp_read_system_prets(sts);
380 
381 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
382 
383 	/* Read the system timestamp post PHC read */
384 	ptp_read_system_postts(sts);
385 
386 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
387 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
388 
389 	if (lo2 < lo) {
390 		/* if TIME_L rolled over read TIME_L again and update
391 		 * system timestamps
392 		 */
393 		ptp_read_system_prets(sts);
394 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
395 		ptp_read_system_postts(sts);
396 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
397 	}
398 
399 	return ((u64)hi << 32) | lo;
400 }
401 
402 /**
403  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
404  * @cached_phc_time: recently cached copy of PHC time
405  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
406  *
407  * Hardware captures timestamps which contain only 32 bits of nominal
408  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
409  * Note that the captured timestamp values may be 40 bits, but the lower
410  * 8 bits are sub-nanoseconds and generally discarded.
411  *
412  * Extend the 32bit nanosecond timestamp using the following algorithm and
413  * assumptions:
414  *
415  * 1) have a recently cached copy of the PHC time
416  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
417  *    seconds) before or after the PHC time was captured.
418  * 3) calculate the delta between the cached time and the timestamp
419  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
420  *    captured after the PHC time. In this case, the full timestamp is just
421  *    the cached PHC time plus the delta.
422  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
423  *    timestamp was captured *before* the PHC time, i.e. because the PHC
424  *    cache was updated after the timestamp was captured by hardware. In this
425  *    case, the full timestamp is the cached time minus the inverse delta.
426  *
427  * This algorithm works even if the PHC time was updated after a Tx timestamp
428  * was requested, but before the Tx timestamp event was reported from
429  * hardware.
430  *
431  * This calculation primarily relies on keeping the cached PHC time up to
432  * date. If the timestamp was captured more than 2^31 nanoseconds after the
433  * PHC time, it is possible that the lower 32bits of PHC time have
434  * overflowed more than once, and we might generate an incorrect timestamp.
435  *
436  * This is prevented by (a) periodically updating the cached PHC time once
437  * a second, and (b) discarding any Tx timestamp packet if it has waited for
438  * a timestamp for more than one second.
439  */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)440 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
441 {
442 	u32 delta, phc_time_lo;
443 	u64 ns;
444 
445 	/* Extract the lower 32 bits of the PHC time */
446 	phc_time_lo = (u32)cached_phc_time;
447 
448 	/* Calculate the delta between the lower 32bits of the cached PHC
449 	 * time and the in_tstamp value
450 	 */
451 	delta = (in_tstamp - phc_time_lo);
452 
453 	/* Do not assume that the in_tstamp is always more recent than the
454 	 * cached PHC time. If the delta is large, it indicates that the
455 	 * in_tstamp was taken in the past, and should be converted
456 	 * forward.
457 	 */
458 	if (delta > (U32_MAX / 2)) {
459 		/* reverse the delta calculation here */
460 		delta = (phc_time_lo - in_tstamp);
461 		ns = cached_phc_time - delta;
462 	} else {
463 		ns = cached_phc_time + delta;
464 	}
465 
466 	return ns;
467 }
468 
469 /**
470  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
471  * @pf: Board private structure
472  * @in_tstamp: Ingress/egress 40b timestamp value
473  *
474  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
475  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
476  *
477  *  *--------------------------------------------------------------*
478  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
479  *  *--------------------------------------------------------------*
480  *
481  * The low bit is an indicator of whether the timestamp is valid. The next
482  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
483  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
484  *
485  * It is assumed that the caller verifies the timestamp is valid prior to
486  * calling this function.
487  *
488  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
489  * time stored in the device private PTP structure as the basis for timestamp
490  * extension.
491  *
492  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
493  * algorithm.
494  */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)495 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
496 {
497 	const u64 mask = GENMASK_ULL(31, 0);
498 	unsigned long discard_time;
499 
500 	/* Discard the hardware timestamp if the cached PHC time is too old */
501 	discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
502 	if (time_is_before_jiffies(discard_time)) {
503 		pf->ptp.tx_hwtstamp_discarded++;
504 		return 0;
505 	}
506 
507 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
508 				     (in_tstamp >> 8) & mask);
509 }
510 
511 /**
512  * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
513  * @tx: the PTP Tx timestamp tracker to check
514  *
515  * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
516  * to accept new timestamp requests.
517  *
518  * Assumes the tx->lock spinlock is already held.
519  */
520 static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx * tx)521 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
522 {
523 	lockdep_assert_held(&tx->lock);
524 
525 	return tx->init && !tx->calibrating;
526 }
527 
528 /**
529  * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
530  * @tx: the PTP Tx timestamp tracker
531  * @idx: index of the timestamp to request
532  */
ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx * tx,u8 idx)533 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
534 {
535 	struct ice_ptp_port *ptp_port;
536 	struct sk_buff *skb;
537 	struct ice_pf *pf;
538 
539 	if (!tx->init)
540 		return;
541 
542 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
543 	pf = ptp_port_to_pf(ptp_port);
544 
545 	/* Drop packets which have waited for more than 2 seconds */
546 	if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
547 		/* Count the number of Tx timestamps that timed out */
548 		pf->ptp.tx_hwtstamp_timeouts++;
549 
550 		skb = tx->tstamps[idx].skb;
551 		tx->tstamps[idx].skb = NULL;
552 		clear_bit(idx, tx->in_use);
553 
554 		dev_kfree_skb_any(skb);
555 		return;
556 	}
557 
558 	ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
559 
560 	/* Write TS index to read to the PF register so the FW can read it */
561 	wr32(&pf->hw, PF_SB_ATQBAL,
562 	     TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
563 	     TS_LL_READ_TS);
564 	tx->last_ll_ts_idx_read = idx;
565 }
566 
567 /**
568  * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
569  * @tx: the PTP Tx timestamp tracker
570  */
ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx * tx)571 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
572 {
573 	struct skb_shared_hwtstamps shhwtstamps = {};
574 	u8 idx = tx->last_ll_ts_idx_read;
575 	struct ice_ptp_port *ptp_port;
576 	u64 raw_tstamp, tstamp;
577 	bool drop_ts = false;
578 	struct sk_buff *skb;
579 	struct ice_pf *pf;
580 	u32 val;
581 
582 	if (!tx->init || tx->last_ll_ts_idx_read < 0)
583 		return;
584 
585 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
586 	pf = ptp_port_to_pf(ptp_port);
587 
588 	ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
589 
590 	val = rd32(&pf->hw, PF_SB_ATQBAL);
591 
592 	/* When the bit is cleared, the TS is ready in the register */
593 	if (val & TS_LL_READ_TS) {
594 		dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
595 		return;
596 	}
597 
598 	/* High 8 bit value of the TS is on the bits 16:23 */
599 	raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
600 	raw_tstamp <<= 32;
601 
602 	/* Read the low 32 bit value */
603 	raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
604 
605 	/* Devices using this interface always verify the timestamp differs
606 	 * relative to the last cached timestamp value.
607 	 */
608 	if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
609 		return;
610 
611 	tx->tstamps[idx].cached_tstamp = raw_tstamp;
612 	clear_bit(idx, tx->in_use);
613 	skb = tx->tstamps[idx].skb;
614 	tx->tstamps[idx].skb = NULL;
615 	if (test_and_clear_bit(idx, tx->stale))
616 		drop_ts = true;
617 
618 	if (!skb)
619 		return;
620 
621 	if (drop_ts) {
622 		dev_kfree_skb_any(skb);
623 		return;
624 	}
625 
626 	/* Extend the timestamp using cached PHC time */
627 	tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
628 	if (tstamp) {
629 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
630 		ice_trace(tx_tstamp_complete, skb, idx);
631 	}
632 
633 	skb_tstamp_tx(skb, &shhwtstamps);
634 	dev_kfree_skb_any(skb);
635 }
636 
637 /**
638  * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
639  * @tx: the PTP Tx timestamp tracker
640  *
641  * Process timestamps captured by the PHY associated with this port. To do
642  * this, loop over each index with a waiting skb.
643  *
644  * If a given index has a valid timestamp, perform the following steps:
645  *
646  * 1) check that the timestamp request is not stale
647  * 2) check that a timestamp is ready and available in the PHY memory bank
648  * 3) read and copy the timestamp out of the PHY register
649  * 4) unlock the index by clearing the associated in_use bit
650  * 5) check if the timestamp is stale, and discard if so
651  * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
652  * 7) send this 64 bit timestamp to the stack
653  *
654  * Note that we do not hold the tracking lock while reading the Tx timestamp.
655  * This is because reading the timestamp requires taking a mutex that might
656  * sleep.
657  *
658  * The only place where we set in_use is when a new timestamp is initiated
659  * with a slot index. This is only called in the hard xmit routine where an
660  * SKB has a request flag set. The only places where we clear this bit is this
661  * function, or during teardown when the Tx timestamp tracker is being
662  * removed. A timestamp index will never be re-used until the in_use bit for
663  * that index is cleared.
664  *
665  * If a Tx thread starts a new timestamp, we might not begin processing it
666  * right away but we will notice it at the end when we re-queue the task.
667  *
668  * If a Tx thread starts a new timestamp just after this function exits, the
669  * interrupt for that timestamp should re-trigger this function once
670  * a timestamp is ready.
671  *
672  * In cases where the PTP hardware clock was directly adjusted, some
673  * timestamps may not be able to safely use the timestamp extension math. In
674  * this case, software will set the stale bit for any outstanding Tx
675  * timestamps when the clock is adjusted. Then this function will discard
676  * those captured timestamps instead of sending them to the stack.
677  *
678  * If a Tx packet has been waiting for more than 2 seconds, it is not possible
679  * to correctly extend the timestamp using the cached PHC time. It is
680  * extremely unlikely that a packet will ever take this long to timestamp. If
681  * we detect a Tx timestamp request that has waited for this long we assume
682  * the packet will never be sent by hardware and discard it without reading
683  * the timestamp register.
684  */
ice_ptp_process_tx_tstamp(struct ice_ptp_tx * tx)685 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
686 {
687 	struct ice_ptp_port *ptp_port;
688 	unsigned long flags;
689 	struct ice_pf *pf;
690 	struct ice_hw *hw;
691 	u64 tstamp_ready;
692 	bool link_up;
693 	int err;
694 	u8 idx;
695 
696 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
697 	pf = ptp_port_to_pf(ptp_port);
698 	hw = &pf->hw;
699 
700 	/* Read the Tx ready status first */
701 	if (tx->has_ready_bitmap) {
702 		err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
703 		if (err)
704 			return;
705 	}
706 
707 	/* Drop packets if the link went down */
708 	link_up = ptp_port->link_up;
709 
710 	for_each_set_bit(idx, tx->in_use, tx->len) {
711 		struct skb_shared_hwtstamps shhwtstamps = {};
712 		u8 phy_idx = idx + tx->offset;
713 		u64 raw_tstamp = 0, tstamp;
714 		bool drop_ts = !link_up;
715 		struct sk_buff *skb;
716 
717 		/* Drop packets which have waited for more than 2 seconds */
718 		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
719 			drop_ts = true;
720 
721 			/* Count the number of Tx timestamps that timed out */
722 			pf->ptp.tx_hwtstamp_timeouts++;
723 		}
724 
725 		/* Only read a timestamp from the PHY if its marked as ready
726 		 * by the tstamp_ready register. This avoids unnecessary
727 		 * reading of timestamps which are not yet valid. This is
728 		 * important as we must read all timestamps which are valid
729 		 * and only timestamps which are valid during each interrupt.
730 		 * If we do not, the hardware logic for generating a new
731 		 * interrupt can get stuck on some devices.
732 		 */
733 		if (tx->has_ready_bitmap &&
734 		    !(tstamp_ready & BIT_ULL(phy_idx))) {
735 			if (drop_ts)
736 				goto skip_ts_read;
737 
738 			continue;
739 		}
740 
741 		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
742 
743 		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
744 		if (err && !drop_ts)
745 			continue;
746 
747 		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
748 
749 		/* For PHYs which don't implement a proper timestamp ready
750 		 * bitmap, verify that the timestamp value is different
751 		 * from the last cached timestamp. If it is not, skip this for
752 		 * now assuming it hasn't yet been captured by hardware.
753 		 */
754 		if (!drop_ts && !tx->has_ready_bitmap &&
755 		    raw_tstamp == tx->tstamps[idx].cached_tstamp)
756 			continue;
757 
758 		/* Discard any timestamp value without the valid bit set */
759 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
760 			drop_ts = true;
761 
762 skip_ts_read:
763 		spin_lock_irqsave(&tx->lock, flags);
764 		if (!tx->has_ready_bitmap && raw_tstamp)
765 			tx->tstamps[idx].cached_tstamp = raw_tstamp;
766 		clear_bit(idx, tx->in_use);
767 		skb = tx->tstamps[idx].skb;
768 		tx->tstamps[idx].skb = NULL;
769 		if (test_and_clear_bit(idx, tx->stale))
770 			drop_ts = true;
771 		spin_unlock_irqrestore(&tx->lock, flags);
772 
773 		/* It is unlikely but possible that the SKB will have been
774 		 * flushed at this point due to link change or teardown.
775 		 */
776 		if (!skb)
777 			continue;
778 
779 		if (drop_ts) {
780 			dev_kfree_skb_any(skb);
781 			continue;
782 		}
783 
784 		/* Extend the timestamp using cached PHC time */
785 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
786 		if (tstamp) {
787 			shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
788 			ice_trace(tx_tstamp_complete, skb, idx);
789 		}
790 
791 		skb_tstamp_tx(skb, &shhwtstamps);
792 		dev_kfree_skb_any(skb);
793 	}
794 }
795 
796 /**
797  * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
798  * @pf: Board private structure
799  */
ice_ptp_tx_tstamp_owner(struct ice_pf * pf)800 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
801 {
802 	struct ice_ptp_port *port;
803 	unsigned int i;
804 
805 	mutex_lock(&pf->ptp.ports_owner.lock);
806 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
807 		struct ice_ptp_tx *tx = &port->tx;
808 
809 		if (!tx || !tx->init)
810 			continue;
811 
812 		ice_ptp_process_tx_tstamp(tx);
813 	}
814 	mutex_unlock(&pf->ptp.ports_owner.lock);
815 
816 	for (i = 0; i < ICE_MAX_QUAD; i++) {
817 		u64 tstamp_ready;
818 		int err;
819 
820 		/* Read the Tx ready status first */
821 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
822 		if (err)
823 			break;
824 		else if (tstamp_ready)
825 			return ICE_TX_TSTAMP_WORK_PENDING;
826 	}
827 
828 	return ICE_TX_TSTAMP_WORK_DONE;
829 }
830 
831 /**
832  * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
833  * @tx: Tx tracking structure to initialize
834  *
835  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
836  * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
837  */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)838 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
839 {
840 	bool more_timestamps;
841 	unsigned long flags;
842 
843 	if (!tx->init)
844 		return ICE_TX_TSTAMP_WORK_DONE;
845 
846 	/* Process the Tx timestamp tracker */
847 	ice_ptp_process_tx_tstamp(tx);
848 
849 	/* Check if there are outstanding Tx timestamps */
850 	spin_lock_irqsave(&tx->lock, flags);
851 	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
852 	spin_unlock_irqrestore(&tx->lock, flags);
853 
854 	if (more_timestamps)
855 		return ICE_TX_TSTAMP_WORK_PENDING;
856 
857 	return ICE_TX_TSTAMP_WORK_DONE;
858 }
859 
860 /**
861  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
862  * @tx: Tx tracking structure to initialize
863  *
864  * Assumes that the length has already been initialized. Do not call directly,
865  * use the ice_ptp_init_tx_* instead.
866  */
867 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)868 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
869 {
870 	unsigned long *in_use, *stale;
871 	struct ice_tx_tstamp *tstamps;
872 
873 	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
874 	in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
875 	stale = bitmap_zalloc(tx->len, GFP_KERNEL);
876 
877 	if (!tstamps || !in_use || !stale) {
878 		kfree(tstamps);
879 		bitmap_free(in_use);
880 		bitmap_free(stale);
881 
882 		return -ENOMEM;
883 	}
884 
885 	tx->tstamps = tstamps;
886 	tx->in_use = in_use;
887 	tx->stale = stale;
888 	tx->init = 1;
889 	tx->last_ll_ts_idx_read = -1;
890 
891 	spin_lock_init(&tx->lock);
892 
893 	return 0;
894 }
895 
896 /**
897  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
898  * @pf: Board private structure
899  * @tx: the tracker to flush
900  *
901  * Called during teardown when a Tx tracker is being removed.
902  */
903 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)904 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
905 {
906 	struct ice_hw *hw = &pf->hw;
907 	unsigned long flags;
908 	u64 tstamp_ready;
909 	int err;
910 	u8 idx;
911 
912 	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
913 	if (err) {
914 		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
915 			tx->block, err);
916 
917 		/* If we fail to read the Tx timestamp ready bitmap just
918 		 * skip clearing the PHY timestamps.
919 		 */
920 		tstamp_ready = 0;
921 	}
922 
923 	for_each_set_bit(idx, tx->in_use, tx->len) {
924 		u8 phy_idx = idx + tx->offset;
925 		struct sk_buff *skb;
926 
927 		/* In case this timestamp is ready, we need to clear it. */
928 		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
929 			ice_clear_phy_tstamp(hw, tx->block, phy_idx);
930 
931 		spin_lock_irqsave(&tx->lock, flags);
932 		skb = tx->tstamps[idx].skb;
933 		tx->tstamps[idx].skb = NULL;
934 		clear_bit(idx, tx->in_use);
935 		clear_bit(idx, tx->stale);
936 		spin_unlock_irqrestore(&tx->lock, flags);
937 
938 		/* Count the number of Tx timestamps flushed */
939 		pf->ptp.tx_hwtstamp_flushed++;
940 
941 		/* Free the SKB after we've cleared the bit */
942 		dev_kfree_skb_any(skb);
943 	}
944 }
945 
946 /**
947  * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
948  * @tx: the tracker to mark
949  *
950  * Mark currently outstanding Tx timestamps as stale. This prevents sending
951  * their timestamp value to the stack. This is required to prevent extending
952  * the 40bit hardware timestamp incorrectly.
953  *
954  * This should be called when the PTP clock is modified such as after a set
955  * time request.
956  */
957 static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx * tx)958 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
959 {
960 	unsigned long flags;
961 
962 	spin_lock_irqsave(&tx->lock, flags);
963 	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
964 	spin_unlock_irqrestore(&tx->lock, flags);
965 }
966 
967 /**
968  * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
969  * @pf: Board private structure
970  *
971  * Called by the clock owner to flush all the Tx timestamp trackers associated
972  * with the clock.
973  */
974 static void
ice_ptp_flush_all_tx_tracker(struct ice_pf * pf)975 ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
976 {
977 	struct ice_ptp_port *port;
978 
979 	list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
980 		ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
981 }
982 
983 /**
984  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
985  * @pf: Board private structure
986  * @tx: Tx tracking structure to release
987  *
988  * Free memory associated with the Tx timestamp tracker.
989  */
990 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)991 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
992 {
993 	unsigned long flags;
994 
995 	spin_lock_irqsave(&tx->lock, flags);
996 	tx->init = 0;
997 	spin_unlock_irqrestore(&tx->lock, flags);
998 
999 	/* wait for potentially outstanding interrupt to complete */
1000 	synchronize_irq(pf->oicr_irq.virq);
1001 
1002 	ice_ptp_flush_tx_tracker(pf, tx);
1003 
1004 	kfree(tx->tstamps);
1005 	tx->tstamps = NULL;
1006 
1007 	bitmap_free(tx->in_use);
1008 	tx->in_use = NULL;
1009 
1010 	bitmap_free(tx->stale);
1011 	tx->stale = NULL;
1012 
1013 	tx->len = 0;
1014 }
1015 
1016 /**
1017  * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
1018  * @pf: Board private structure
1019  * @tx: the Tx tracking structure to initialize
1020  * @port: the port this structure tracks
1021  *
1022  * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
1023  * the timestamp block is shared for all ports in the same quad. To avoid
1024  * ports using the same timestamp index, logically break the block of
1025  * registers into chunks based on the port number.
1026  */
1027 static int
ice_ptp_init_tx_e82x(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)1028 ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
1029 {
1030 	tx->block = port / ICE_PORTS_PER_QUAD;
1031 	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
1032 	tx->len = INDEX_PER_PORT_E82X;
1033 	tx->has_ready_bitmap = 1;
1034 
1035 	return ice_ptp_alloc_tx_tracker(tx);
1036 }
1037 
1038 /**
1039  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1040  * @pf: Board private structure
1041  * @tx: the Tx tracking structure to initialize
1042  *
1043  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1044  * port has its own block of timestamps, independent of the other ports.
1045  */
1046 static int
ice_ptp_init_tx_e810(struct ice_pf * pf,struct ice_ptp_tx * tx)1047 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1048 {
1049 	tx->block = pf->hw.port_info->lport;
1050 	tx->offset = 0;
1051 	tx->len = INDEX_PER_PORT_E810;
1052 	/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
1053 	 * verify new timestamps against cached copy of the last read
1054 	 * timestamp.
1055 	 */
1056 	tx->has_ready_bitmap = 0;
1057 
1058 	return ice_ptp_alloc_tx_tracker(tx);
1059 }
1060 
1061 /**
1062  * ice_ptp_update_cached_phctime - Update the cached PHC time values
1063  * @pf: Board specific private structure
1064  *
1065  * This function updates the system time values which are cached in the PF
1066  * structure and the Rx rings.
1067  *
1068  * This function must be called periodically to ensure that the cached value
1069  * is never more than 2 seconds old.
1070  *
1071  * Note that the cached copy in the PF PTP structure is always updated, even
1072  * if we can't update the copy in the Rx rings.
1073  *
1074  * Return:
1075  * * 0 - OK, successfully updated
1076  * * -EAGAIN - PF was busy, need to reschedule the update
1077  */
ice_ptp_update_cached_phctime(struct ice_pf * pf)1078 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
1079 {
1080 	struct device *dev = ice_pf_to_dev(pf);
1081 	unsigned long update_before;
1082 	u64 systime;
1083 	int i;
1084 
1085 	update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
1086 	if (pf->ptp.cached_phc_time &&
1087 	    time_is_before_jiffies(update_before)) {
1088 		unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
1089 
1090 		dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
1091 			 jiffies_to_msecs(time_taken));
1092 		pf->ptp.late_cached_phc_updates++;
1093 	}
1094 
1095 	/* Read the current PHC time */
1096 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
1097 
1098 	/* Update the cached PHC time stored in the PF structure */
1099 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
1100 	WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
1101 
1102 	if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
1103 		return -EAGAIN;
1104 
1105 	ice_for_each_vsi(pf, i) {
1106 		struct ice_vsi *vsi = pf->vsi[i];
1107 		int j;
1108 
1109 		if (!vsi)
1110 			continue;
1111 
1112 		if (vsi->type != ICE_VSI_PF)
1113 			continue;
1114 
1115 		ice_for_each_rxq(vsi, j) {
1116 			if (!vsi->rx_rings[j])
1117 				continue;
1118 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
1119 		}
1120 	}
1121 	clear_bit(ICE_CFG_BUSY, pf->state);
1122 
1123 	return 0;
1124 }
1125 
1126 /**
1127  * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
1128  * @pf: Board specific private structure
1129  *
1130  * This function must be called when the cached PHC time is no longer valid,
1131  * such as after a time adjustment. It marks any currently outstanding Tx
1132  * timestamps as stale and updates the cached PHC time for both the PF and Rx
1133  * rings.
1134  *
1135  * If updating the PHC time cannot be done immediately, a warning message is
1136  * logged and the work item is scheduled immediately to minimize the window
1137  * with a wrong cached timestamp.
1138  */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)1139 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
1140 {
1141 	struct device *dev = ice_pf_to_dev(pf);
1142 	int err;
1143 
1144 	/* Update the cached PHC time immediately if possible, otherwise
1145 	 * schedule the work item to execute soon.
1146 	 */
1147 	err = ice_ptp_update_cached_phctime(pf);
1148 	if (err) {
1149 		/* If another thread is updating the Rx rings, we won't
1150 		 * properly reset them here. This could lead to reporting of
1151 		 * invalid timestamps, but there isn't much we can do.
1152 		 */
1153 		dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
1154 			 __func__);
1155 
1156 		/* Queue the work item to update the Rx rings when possible */
1157 		kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
1158 					   msecs_to_jiffies(10));
1159 	}
1160 
1161 	/* Mark any outstanding timestamps as stale, since they might have
1162 	 * been captured in hardware before the time update. This could lead
1163 	 * to us extending them with the wrong cached value resulting in
1164 	 * incorrect timestamp values.
1165 	 */
1166 	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
1167 }
1168 
1169 /**
1170  * ice_ptp_write_init - Set PHC time to provided value
1171  * @pf: Board private structure
1172  * @ts: timespec structure that holds the new time value
1173  *
1174  * Set the PHC time to the specified time provided in the timespec.
1175  */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1176 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1177 {
1178 	u64 ns = timespec64_to_ns(ts);
1179 	struct ice_hw *hw = &pf->hw;
1180 
1181 	return ice_ptp_init_time(hw, ns);
1182 }
1183 
1184 /**
1185  * ice_ptp_write_adj - Adjust PHC clock time atomically
1186  * @pf: Board private structure
1187  * @adj: Adjustment in nanoseconds
1188  *
1189  * Perform an atomic adjustment of the PHC time by the specified number of
1190  * nanoseconds.
1191  */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1192 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1193 {
1194 	struct ice_hw *hw = &pf->hw;
1195 
1196 	return ice_ptp_adj_clock(hw, adj);
1197 }
1198 
1199 /**
1200  * ice_base_incval - Get base timer increment value
1201  * @pf: Board private structure
1202  *
1203  * Look up the base timer increment value for this device. The base increment
1204  * value is used to define the nominal clock tick rate. This increment value
1205  * is programmed during device initialization. It is also used as the basis
1206  * for calculating adjustments using scaled_ppm.
1207  */
ice_base_incval(struct ice_pf * pf)1208 static u64 ice_base_incval(struct ice_pf *pf)
1209 {
1210 	struct ice_hw *hw = &pf->hw;
1211 	u64 incval;
1212 
1213 	if (ice_is_e810(hw))
1214 		incval = ICE_PTP_NOMINAL_INCVAL_E810;
1215 	else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
1216 		incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
1217 	else
1218 		incval = UNKNOWN_INCVAL_E82X;
1219 
1220 	dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1221 		incval);
1222 
1223 	return incval;
1224 }
1225 
1226 /**
1227  * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1228  * @port: PTP port for which Tx FIFO is checked
1229  */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1230 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1231 {
1232 	int quad = port->port_num / ICE_PORTS_PER_QUAD;
1233 	int offs = port->port_num % ICE_PORTS_PER_QUAD;
1234 	struct ice_pf *pf;
1235 	struct ice_hw *hw;
1236 	u32 val, phy_sts;
1237 	int err;
1238 
1239 	pf = ptp_port_to_pf(port);
1240 	hw = &pf->hw;
1241 
1242 	if (port->tx_fifo_busy_cnt == FIFO_OK)
1243 		return 0;
1244 
1245 	/* need to read FIFO state */
1246 	if (offs == 0 || offs == 1)
1247 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
1248 					     &val);
1249 	else
1250 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
1251 					     &val);
1252 
1253 	if (err) {
1254 		dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1255 			port->port_num, err);
1256 		return err;
1257 	}
1258 
1259 	if (offs & 0x1)
1260 		phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
1261 	else
1262 		phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
1263 
1264 	if (phy_sts & FIFO_EMPTY) {
1265 		port->tx_fifo_busy_cnt = FIFO_OK;
1266 		return 0;
1267 	}
1268 
1269 	port->tx_fifo_busy_cnt++;
1270 
1271 	dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1272 		port->tx_fifo_busy_cnt, port->port_num);
1273 
1274 	if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1275 		dev_dbg(ice_pf_to_dev(pf),
1276 			"Port %d Tx FIFO still not empty; resetting quad %d\n",
1277 			port->port_num, quad);
1278 		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
1279 		port->tx_fifo_busy_cnt = FIFO_OK;
1280 		return 0;
1281 	}
1282 
1283 	return -EAGAIN;
1284 }
1285 
1286 /**
1287  * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
1288  * @work: Pointer to the kthread_work structure for this task
1289  *
1290  * Check whether hardware has completed measuring the Tx and Rx offset values
1291  * used to configure and enable vernier timestamp calibration.
1292  *
1293  * Once the offset in either direction is measured, configure the associated
1294  * registers with the calibrated offset values and enable timestamping. The Tx
1295  * and Rx directions are configured independently as soon as their associated
1296  * offsets are known.
1297  *
1298  * This function reschedules itself until both Tx and Rx calibration have
1299  * completed.
1300  */
ice_ptp_wait_for_offsets(struct kthread_work * work)1301 static void ice_ptp_wait_for_offsets(struct kthread_work *work)
1302 {
1303 	struct ice_ptp_port *port;
1304 	struct ice_pf *pf;
1305 	struct ice_hw *hw;
1306 	int tx_err;
1307 	int rx_err;
1308 
1309 	port = container_of(work, struct ice_ptp_port, ov_work.work);
1310 	pf = ptp_port_to_pf(port);
1311 	hw = &pf->hw;
1312 
1313 	if (ice_is_reset_in_progress(pf->state)) {
1314 		/* wait for device driver to complete reset */
1315 		kthread_queue_delayed_work(pf->ptp.kworker,
1316 					   &port->ov_work,
1317 					   msecs_to_jiffies(100));
1318 		return;
1319 	}
1320 
1321 	tx_err = ice_ptp_check_tx_fifo(port);
1322 	if (!tx_err)
1323 		tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
1324 	rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
1325 	if (tx_err || rx_err) {
1326 		/* Tx and/or Rx offset not yet configured, try again later */
1327 		kthread_queue_delayed_work(pf->ptp.kworker,
1328 					   &port->ov_work,
1329 					   msecs_to_jiffies(100));
1330 		return;
1331 	}
1332 }
1333 
1334 /**
1335  * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1336  * @ptp_port: PTP port to stop
1337  */
1338 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1339 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1340 {
1341 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1342 	u8 port = ptp_port->port_num;
1343 	struct ice_hw *hw = &pf->hw;
1344 	int err;
1345 
1346 	if (ice_is_e810(hw))
1347 		return 0;
1348 
1349 	mutex_lock(&ptp_port->ps_lock);
1350 
1351 	kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1352 
1353 	err = ice_stop_phy_timer_e82x(hw, port, true);
1354 	if (err)
1355 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1356 			port, err);
1357 
1358 	mutex_unlock(&ptp_port->ps_lock);
1359 
1360 	return err;
1361 }
1362 
1363 /**
1364  * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1365  * @ptp_port: PTP port for which the PHY start is set
1366  *
1367  * Start the PHY timestamping block, and initiate Vernier timestamping
1368  * calibration. If timestamping cannot be calibrated (such as if link is down)
1369  * then disable the timestamping block instead.
1370  */
1371 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1372 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1373 {
1374 	struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1375 	u8 port = ptp_port->port_num;
1376 	struct ice_hw *hw = &pf->hw;
1377 	unsigned long flags;
1378 	int err;
1379 
1380 	if (ice_is_e810(hw))
1381 		return 0;
1382 
1383 	if (!ptp_port->link_up)
1384 		return ice_ptp_port_phy_stop(ptp_port);
1385 
1386 	mutex_lock(&ptp_port->ps_lock);
1387 
1388 	kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1389 
1390 	/* temporarily disable Tx timestamps while calibrating PHY offset */
1391 	spin_lock_irqsave(&ptp_port->tx.lock, flags);
1392 	ptp_port->tx.calibrating = true;
1393 	spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1394 	ptp_port->tx_fifo_busy_cnt = 0;
1395 
1396 	/* Start the PHY timer in Vernier mode */
1397 	err = ice_start_phy_timer_e82x(hw, port);
1398 	if (err)
1399 		goto out_unlock;
1400 
1401 	/* Enable Tx timestamps right away */
1402 	spin_lock_irqsave(&ptp_port->tx.lock, flags);
1403 	ptp_port->tx.calibrating = false;
1404 	spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
1405 
1406 	kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
1407 
1408 out_unlock:
1409 	if (err)
1410 		dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1411 			port, err);
1412 
1413 	mutex_unlock(&ptp_port->ps_lock);
1414 
1415 	return err;
1416 }
1417 
1418 /**
1419  * ice_ptp_link_change - Reconfigure PTP after link status change
1420  * @pf: Board private structure
1421  * @port: Port for which the PHY start is set
1422  * @linkup: Link is up or down
1423  */
ice_ptp_link_change(struct ice_pf * pf,u8 port,bool linkup)1424 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1425 {
1426 	struct ice_ptp_port *ptp_port;
1427 	struct ice_hw *hw = &pf->hw;
1428 
1429 	if (pf->ptp.state != ICE_PTP_READY)
1430 		return;
1431 
1432 	if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
1433 		return;
1434 
1435 	ptp_port = &pf->ptp.port;
1436 	if (WARN_ON_ONCE(ptp_port->port_num != port))
1437 		return;
1438 
1439 	/* Update cached link status for this port immediately */
1440 	ptp_port->link_up = linkup;
1441 
1442 	switch (hw->phy_model) {
1443 	case ICE_PHY_E810:
1444 		/* Do not reconfigure E810 PHY */
1445 		return;
1446 	case ICE_PHY_E82X:
1447 		ice_ptp_port_phy_restart(ptp_port);
1448 		return;
1449 	default:
1450 		dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
1451 	}
1452 }
1453 
1454 /**
1455  * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
1456  * @pf: PF private structure
1457  * @ena: bool value to enable or disable interrupt
1458  * @threshold: Minimum number of packets at which intr is triggered
1459  *
1460  * Utility function to enable or disable Tx timestamp interrupt and threshold
1461  */
ice_ptp_cfg_phy_interrupt(struct ice_pf * pf,bool ena,u32 threshold)1462 static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
1463 {
1464 	struct ice_hw *hw = &pf->hw;
1465 	int err = 0;
1466 	int quad;
1467 	u32 val;
1468 
1469 	ice_ptp_reset_ts_memory(hw);
1470 
1471 	for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
1472 		err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1473 					     &val);
1474 		if (err)
1475 			break;
1476 
1477 		if (ena) {
1478 			val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1479 			val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
1480 			val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
1481 					  threshold);
1482 		} else {
1483 			val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1484 		}
1485 
1486 		err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1487 					      val);
1488 		if (err)
1489 			break;
1490 	}
1491 
1492 	if (err)
1493 		dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
1494 			err);
1495 	return err;
1496 }
1497 
1498 /**
1499  * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1500  * @pf: Board private structure
1501  */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1502 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1503 {
1504 	ice_ptp_port_phy_restart(&pf->ptp.port);
1505 }
1506 
1507 /**
1508  * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
1509  * @pf: Board private structure
1510  */
ice_ptp_restart_all_phy(struct ice_pf * pf)1511 static void ice_ptp_restart_all_phy(struct ice_pf *pf)
1512 {
1513 	struct list_head *entry;
1514 
1515 	list_for_each(entry, &pf->ptp.ports_owner.ports) {
1516 		struct ice_ptp_port *port = list_entry(entry,
1517 						       struct ice_ptp_port,
1518 						       list_member);
1519 
1520 		if (port->link_up)
1521 			ice_ptp_port_phy_restart(port);
1522 	}
1523 }
1524 
1525 /**
1526  * ice_ptp_adjfine - Adjust clock increment rate
1527  * @info: the driver's PTP info structure
1528  * @scaled_ppm: Parts per million with 16-bit fractional field
1529  *
1530  * Adjust the frequency of the clock by the indicated scaled ppm from the
1531  * base frequency.
1532  */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1533 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1534 {
1535 	struct ice_pf *pf = ptp_info_to_pf(info);
1536 	struct ice_hw *hw = &pf->hw;
1537 	u64 incval;
1538 	int err;
1539 
1540 	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
1541 	err = ice_ptp_write_incval_locked(hw, incval);
1542 	if (err) {
1543 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1544 			err);
1545 		return -EIO;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * ice_ptp_extts_event - Process PTP external clock event
1553  * @pf: Board private structure
1554  */
ice_ptp_extts_event(struct ice_pf * pf)1555 void ice_ptp_extts_event(struct ice_pf *pf)
1556 {
1557 	struct ptp_clock_event event;
1558 	struct ice_hw *hw = &pf->hw;
1559 	u8 chan, tmr_idx;
1560 	u32 hi, lo;
1561 
1562 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1563 	/* Event time is captured by one of the two matched registers
1564 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
1565 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
1566 	 * Event is defined in GLTSYN_EVNT_0 register
1567 	 */
1568 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1569 		/* Check if channel is enabled */
1570 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
1571 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1572 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1573 			event.timestamp = (((u64)hi) << 32) | lo;
1574 			event.type = PTP_CLOCK_EXTTS;
1575 			event.index = chan;
1576 
1577 			/* Fire event */
1578 			ptp_clock_event(pf->ptp.clock, &event);
1579 			pf->ptp.ext_ts_irq &= ~(1 << chan);
1580 		}
1581 	}
1582 }
1583 
1584 /**
1585  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1586  * @pf: Board private structure
1587  * @ena: true to enable; false to disable
1588  * @chan: GPIO channel (0-3)
1589  * @gpio_pin: GPIO pin
1590  * @extts_flags: request flags from the ptp_extts_request.flags
1591  */
1592 static int
ice_ptp_cfg_extts(struct ice_pf * pf,bool ena,unsigned int chan,u32 gpio_pin,unsigned int extts_flags)1593 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
1594 		  unsigned int extts_flags)
1595 {
1596 	u32 func, aux_reg, gpio_reg, irq_reg;
1597 	struct ice_hw *hw = &pf->hw;
1598 	u8 tmr_idx;
1599 
1600 	if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
1601 		return -EINVAL;
1602 
1603 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1604 
1605 	irq_reg = rd32(hw, PFINT_OICR_ENA);
1606 
1607 	if (ena) {
1608 		/* Enable the interrupt */
1609 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1610 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1611 
1612 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
1613 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
1614 
1615 		/* set event level to requested edge */
1616 		if (extts_flags & PTP_FALLING_EDGE)
1617 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1618 		if (extts_flags & PTP_RISING_EDGE)
1619 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1620 
1621 		/* Write GPIO CTL reg.
1622 		 * 0x1 is input sampled by EVENT register(channel)
1623 		 * + num_in_channels * tmr_idx
1624 		 */
1625 		func = 1 + chan + (tmr_idx * 3);
1626 		gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1627 		pf->ptp.ext_ts_chan |= (1 << chan);
1628 	} else {
1629 		/* clear the values we set to reset defaults */
1630 		aux_reg = 0;
1631 		gpio_reg = 0;
1632 		pf->ptp.ext_ts_chan &= ~(1 << chan);
1633 		if (!pf->ptp.ext_ts_chan)
1634 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1635 	}
1636 
1637 	wr32(hw, PFINT_OICR_ENA, irq_reg);
1638 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1639 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1640 
1641 	return 0;
1642 }
1643 
1644 /**
1645  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1646  * @pf: Board private structure
1647  * @chan: GPIO channel (0-3)
1648  * @config: desired periodic clk configuration. NULL will disable channel
1649  * @store: If set to true the values will be stored
1650  *
1651  * Configure the internal clock generator modules to generate the clock wave of
1652  * specified period.
1653  */
ice_ptp_cfg_clkout(struct ice_pf * pf,unsigned int chan,struct ice_perout_channel * config,bool store)1654 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1655 			      struct ice_perout_channel *config, bool store)
1656 {
1657 	u64 current_time, period, start_time, phase;
1658 	struct ice_hw *hw = &pf->hw;
1659 	u32 func, val, gpio_pin;
1660 	u8 tmr_idx;
1661 
1662 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1663 
1664 	/* 0. Reset mode & out_en in AUX_OUT */
1665 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1666 
1667 	/* If we're disabling the output, clear out CLKO and TGT and keep
1668 	 * output level low
1669 	 */
1670 	if (!config || !config->ena) {
1671 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1672 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1673 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1674 
1675 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
1676 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1677 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1678 
1679 		/* Store the value if requested */
1680 		if (store)
1681 			memset(&pf->ptp.perout_channels[chan], 0,
1682 			       sizeof(struct ice_perout_channel));
1683 
1684 		return 0;
1685 	}
1686 	period = config->period;
1687 	start_time = config->start_time;
1688 	div64_u64_rem(start_time, period, &phase);
1689 	gpio_pin = config->gpio_pin;
1690 
1691 	/* 1. Write clkout with half of required period value */
1692 	if (period & 0x1) {
1693 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1694 		goto err;
1695 	}
1696 
1697 	period >>= 1;
1698 
1699 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1700 	 */
1701 #define MIN_PULSE 3
1702 	if (period <= MIN_PULSE || period > U32_MAX) {
1703 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1704 			MIN_PULSE * 2);
1705 		goto err;
1706 	}
1707 
1708 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1709 
1710 	/* Allow time for programming before start_time is hit */
1711 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1712 
1713 	/* if start time is in the past start the timer at the nearest second
1714 	 * maintaining phase
1715 	 */
1716 	if (start_time < current_time)
1717 		start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1718 				       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1719 
1720 	if (ice_is_e810(hw))
1721 		start_time -= E810_OUT_PROP_DELAY_NS;
1722 	else
1723 		start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
1724 
1725 	/* 2. Write TARGET time */
1726 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1727 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1728 
1729 	/* 3. Write AUX_OUT register */
1730 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1731 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1732 
1733 	/* 4. write GPIO CTL reg */
1734 	func = 8 + chan + (tmr_idx * 4);
1735 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
1736 	      FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
1737 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1738 
1739 	/* Store the value if requested */
1740 	if (store) {
1741 		memcpy(&pf->ptp.perout_channels[chan], config,
1742 		       sizeof(struct ice_perout_channel));
1743 		pf->ptp.perout_channels[chan].start_time = phase;
1744 	}
1745 
1746 	return 0;
1747 err:
1748 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1749 	return -EFAULT;
1750 }
1751 
1752 /**
1753  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1754  * @pf: pointer to the PF structure
1755  *
1756  * Disable all currently configured clock outputs. This is necessary before
1757  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1758  * re-enable the clocks again.
1759  */
ice_ptp_disable_all_clkout(struct ice_pf * pf)1760 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1761 {
1762 	uint i;
1763 
1764 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1765 		if (pf->ptp.perout_channels[i].ena)
1766 			ice_ptp_cfg_clkout(pf, i, NULL, false);
1767 }
1768 
1769 /**
1770  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1771  * @pf: pointer to the PF structure
1772  *
1773  * Enable all currently configured clock outputs. Use this after
1774  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1775  * their configuration.
1776  */
ice_ptp_enable_all_clkout(struct ice_pf * pf)1777 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1778 {
1779 	uint i;
1780 
1781 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
1782 		if (pf->ptp.perout_channels[i].ena)
1783 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1784 					   false);
1785 }
1786 
1787 /**
1788  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1789  * @info: the driver's PTP info structure
1790  * @rq: The requested feature to change
1791  * @on: Enable/disable flag
1792  */
1793 static int
ice_ptp_gpio_enable_e810(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1794 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1795 			 struct ptp_clock_request *rq, int on)
1796 {
1797 	struct ice_pf *pf = ptp_info_to_pf(info);
1798 	struct ice_perout_channel clk_cfg = {0};
1799 	bool sma_pres = false;
1800 	unsigned int chan;
1801 	u32 gpio_pin;
1802 	int err;
1803 
1804 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1805 		sma_pres = true;
1806 
1807 	switch (rq->type) {
1808 	case PTP_CLK_REQ_PEROUT:
1809 		chan = rq->perout.index;
1810 		if (sma_pres) {
1811 			if (chan == ice_pin_desc_e810t[SMA1].chan)
1812 				clk_cfg.gpio_pin = GPIO_20;
1813 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
1814 				clk_cfg.gpio_pin = GPIO_22;
1815 			else
1816 				return -1;
1817 		} else if (ice_is_e810t(&pf->hw)) {
1818 			if (chan == 0)
1819 				clk_cfg.gpio_pin = GPIO_20;
1820 			else
1821 				clk_cfg.gpio_pin = GPIO_22;
1822 		} else if (chan == PPS_CLK_GEN_CHAN) {
1823 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1824 		} else {
1825 			clk_cfg.gpio_pin = chan;
1826 		}
1827 
1828 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1829 				   rq->perout.period.nsec);
1830 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1831 				       rq->perout.start.nsec);
1832 		clk_cfg.ena = !!on;
1833 
1834 		err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1835 		break;
1836 	case PTP_CLK_REQ_EXTTS:
1837 		chan = rq->extts.index;
1838 		if (sma_pres) {
1839 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1840 				gpio_pin = GPIO_21;
1841 			else
1842 				gpio_pin = GPIO_23;
1843 		} else if (ice_is_e810t(&pf->hw)) {
1844 			if (chan == 0)
1845 				gpio_pin = GPIO_21;
1846 			else
1847 				gpio_pin = GPIO_23;
1848 		} else {
1849 			gpio_pin = chan;
1850 		}
1851 
1852 		err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1853 					rq->extts.flags);
1854 		break;
1855 	default:
1856 		return -EOPNOTSUPP;
1857 	}
1858 
1859 	return err;
1860 }
1861 
1862 /**
1863  * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
1864  * @info: the driver's PTP info structure
1865  * @rq: The requested feature to change
1866  * @on: Enable/disable flag
1867  */
ice_ptp_gpio_enable_e823(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1868 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
1869 				    struct ptp_clock_request *rq, int on)
1870 {
1871 	struct ice_pf *pf = ptp_info_to_pf(info);
1872 	struct ice_perout_channel clk_cfg = {0};
1873 	int err;
1874 
1875 	switch (rq->type) {
1876 	case PTP_CLK_REQ_PPS:
1877 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
1878 		clk_cfg.period = NSEC_PER_SEC;
1879 		clk_cfg.ena = !!on;
1880 
1881 		err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
1882 		break;
1883 	case PTP_CLK_REQ_EXTTS:
1884 		err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
1885 					TIME_SYNC_PIN_INDEX, rq->extts.flags);
1886 		break;
1887 	default:
1888 		return -EOPNOTSUPP;
1889 	}
1890 
1891 	return err;
1892 }
1893 
1894 /**
1895  * ice_ptp_gettimex64 - Get the time of the clock
1896  * @info: the driver's PTP info structure
1897  * @ts: timespec64 structure to hold the current time value
1898  * @sts: Optional parameter for holding a pair of system timestamps from
1899  *       the system clock. Will be ignored if NULL is given.
1900  *
1901  * Read the device clock and return the correct value on ns, after converting it
1902  * into a timespec struct.
1903  */
1904 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1905 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1906 		   struct ptp_system_timestamp *sts)
1907 {
1908 	struct ice_pf *pf = ptp_info_to_pf(info);
1909 	u64 time_ns;
1910 
1911 	time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1912 	*ts = ns_to_timespec64(time_ns);
1913 	return 0;
1914 }
1915 
1916 /**
1917  * ice_ptp_settime64 - Set the time of the clock
1918  * @info: the driver's PTP info structure
1919  * @ts: timespec64 structure that holds the new time value
1920  *
1921  * Set the device clock to the user input value. The conversion from timespec
1922  * to ns happens in the write function.
1923  */
1924 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1925 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1926 {
1927 	struct ice_pf *pf = ptp_info_to_pf(info);
1928 	struct timespec64 ts64 = *ts;
1929 	struct ice_hw *hw = &pf->hw;
1930 	int err;
1931 
1932 	/* For Vernier mode, we need to recalibrate after new settime
1933 	 * Start with disabling timestamp block
1934 	 */
1935 	if (pf->ptp.port.link_up)
1936 		ice_ptp_port_phy_stop(&pf->ptp.port);
1937 
1938 	if (!ice_ptp_lock(hw)) {
1939 		err = -EBUSY;
1940 		goto exit;
1941 	}
1942 
1943 	/* Disable periodic outputs */
1944 	ice_ptp_disable_all_clkout(pf);
1945 
1946 	err = ice_ptp_write_init(pf, &ts64);
1947 	ice_ptp_unlock(hw);
1948 
1949 	if (!err)
1950 		ice_ptp_reset_cached_phctime(pf);
1951 
1952 	/* Reenable periodic outputs */
1953 	ice_ptp_enable_all_clkout(pf);
1954 
1955 	/* Recalibrate and re-enable timestamp blocks for E822/E823 */
1956 	if (hw->phy_model == ICE_PHY_E82X)
1957 		ice_ptp_restart_all_phy(pf);
1958 exit:
1959 	if (err) {
1960 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1961 		return err;
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 /**
1968  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1969  * @info: the driver's PTP info structure
1970  * @delta: Offset in nanoseconds to adjust the time by
1971  */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1972 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1973 {
1974 	struct timespec64 now, then;
1975 	int ret;
1976 
1977 	then = ns_to_timespec64(delta);
1978 	ret = ice_ptp_gettimex64(info, &now, NULL);
1979 	if (ret)
1980 		return ret;
1981 	now = timespec64_add(now, then);
1982 
1983 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1984 }
1985 
1986 /**
1987  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1988  * @info: the driver's PTP info structure
1989  * @delta: Offset in nanoseconds to adjust the time by
1990  */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1991 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1992 {
1993 	struct ice_pf *pf = ptp_info_to_pf(info);
1994 	struct ice_hw *hw = &pf->hw;
1995 	struct device *dev;
1996 	int err;
1997 
1998 	dev = ice_pf_to_dev(pf);
1999 
2000 	/* Hardware only supports atomic adjustments using signed 32-bit
2001 	 * integers. For any adjustment outside this range, perform
2002 	 * a non-atomic get->adjust->set flow.
2003 	 */
2004 	if (delta > S32_MAX || delta < S32_MIN) {
2005 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
2006 		return ice_ptp_adjtime_nonatomic(info, delta);
2007 	}
2008 
2009 	if (!ice_ptp_lock(hw)) {
2010 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
2011 		return -EBUSY;
2012 	}
2013 
2014 	/* Disable periodic outputs */
2015 	ice_ptp_disable_all_clkout(pf);
2016 
2017 	err = ice_ptp_write_adj(pf, delta);
2018 
2019 	/* Reenable periodic outputs */
2020 	ice_ptp_enable_all_clkout(pf);
2021 
2022 	ice_ptp_unlock(hw);
2023 
2024 	if (err) {
2025 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
2026 		return err;
2027 	}
2028 
2029 	ice_ptp_reset_cached_phctime(pf);
2030 
2031 	return 0;
2032 }
2033 
2034 #ifdef CONFIG_ICE_HWTS
2035 /**
2036  * ice_ptp_get_syncdevicetime - Get the cross time stamp info
2037  * @device: Current device time
2038  * @system: System counter value read synchronously with device time
2039  * @ctx: Context provided by timekeeping code
2040  *
2041  * Read device and system (ART) clock simultaneously and return the corrected
2042  * clock values in ns.
2043  */
2044 static int
ice_ptp_get_syncdevicetime(ktime_t * device,struct system_counterval_t * system,void * ctx)2045 ice_ptp_get_syncdevicetime(ktime_t *device,
2046 			   struct system_counterval_t *system,
2047 			   void *ctx)
2048 {
2049 	struct ice_pf *pf = (struct ice_pf *)ctx;
2050 	struct ice_hw *hw = &pf->hw;
2051 	u32 hh_lock, hh_art_ctl;
2052 	int i;
2053 
2054 #define MAX_HH_HW_LOCK_TRIES	5
2055 #define MAX_HH_CTL_LOCK_TRIES	100
2056 
2057 	for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
2058 		/* Get the HW lock */
2059 		hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2060 		if (hh_lock & PFHH_SEM_BUSY_M) {
2061 			usleep_range(10000, 15000);
2062 			continue;
2063 		}
2064 		break;
2065 	}
2066 	if (hh_lock & PFHH_SEM_BUSY_M) {
2067 		dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
2068 		return -EBUSY;
2069 	}
2070 
2071 	/* Program cmd to master timer */
2072 	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2073 
2074 	/* Start the ART and device clock sync sequence */
2075 	hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2076 	hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
2077 	wr32(hw, GLHH_ART_CTL, hh_art_ctl);
2078 
2079 	for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
2080 		/* Wait for sync to complete */
2081 		hh_art_ctl = rd32(hw, GLHH_ART_CTL);
2082 		if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
2083 			udelay(1);
2084 			continue;
2085 		} else {
2086 			u32 hh_ts_lo, hh_ts_hi, tmr_idx;
2087 			u64 hh_ts;
2088 
2089 			tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
2090 			/* Read ART time */
2091 			hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
2092 			hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
2093 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2094 			*system = convert_art_ns_to_tsc(hh_ts);
2095 			/* Read Device source clock time */
2096 			hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
2097 			hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
2098 			hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
2099 			*device = ns_to_ktime(hh_ts);
2100 			break;
2101 		}
2102 	}
2103 
2104 	/* Clear the master timer */
2105 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2106 
2107 	/* Release HW lock */
2108 	hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2109 	hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2110 	wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2111 
2112 	if (i == MAX_HH_CTL_LOCK_TRIES)
2113 		return -ETIMEDOUT;
2114 
2115 	return 0;
2116 }
2117 
2118 /**
2119  * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
2120  * @info: the driver's PTP info structure
2121  * @cts: The memory to fill the cross timestamp info
2122  *
2123  * Capture a cross timestamp between the ART and the device PTP hardware
2124  * clock. Fill the cross timestamp information and report it back to the
2125  * caller.
2126  *
2127  * This is only valid for E822 and E823 devices which have support for
2128  * generating the cross timestamp via PCIe PTM.
2129  *
2130  * In order to correctly correlate the ART timestamp back to the TSC time, the
2131  * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2132  */
2133 static int
ice_ptp_getcrosststamp_e82x(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2134 ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
2135 			    struct system_device_crosststamp *cts)
2136 {
2137 	struct ice_pf *pf = ptp_info_to_pf(info);
2138 
2139 	return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2140 					     pf, NULL, cts);
2141 }
2142 #endif /* CONFIG_ICE_HWTS */
2143 
2144 /**
2145  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2146  * @pf: Board private structure
2147  * @ifr: ioctl data
2148  *
2149  * Copy the timestamping config to user buffer
2150  */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2151 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2152 {
2153 	struct hwtstamp_config *config;
2154 
2155 	if (pf->ptp.state != ICE_PTP_READY)
2156 		return -EIO;
2157 
2158 	config = &pf->ptp.tstamp_config;
2159 
2160 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2161 		-EFAULT : 0;
2162 }
2163 
2164 /**
2165  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2166  * @pf: Board private structure
2167  * @config: hwtstamp settings requested or saved
2168  */
2169 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2170 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2171 {
2172 	switch (config->tx_type) {
2173 	case HWTSTAMP_TX_OFF:
2174 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
2175 		break;
2176 	case HWTSTAMP_TX_ON:
2177 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
2178 		break;
2179 	default:
2180 		return -ERANGE;
2181 	}
2182 
2183 	switch (config->rx_filter) {
2184 	case HWTSTAMP_FILTER_NONE:
2185 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2186 		break;
2187 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2188 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2189 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2190 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2191 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2192 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2193 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2194 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2195 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2196 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2197 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2198 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2199 	case HWTSTAMP_FILTER_NTP_ALL:
2200 	case HWTSTAMP_FILTER_ALL:
2201 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
2202 		break;
2203 	default:
2204 		return -ERANGE;
2205 	}
2206 
2207 	/* Immediately update the device timestamping mode */
2208 	ice_ptp_restore_timestamp_mode(pf);
2209 
2210 	return 0;
2211 }
2212 
2213 /**
2214  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2215  * @pf: Board private structure
2216  * @ifr: ioctl data
2217  *
2218  * Get the user config and store it
2219  */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2220 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2221 {
2222 	struct hwtstamp_config config;
2223 	int err;
2224 
2225 	if (pf->ptp.state != ICE_PTP_READY)
2226 		return -EAGAIN;
2227 
2228 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2229 		return -EFAULT;
2230 
2231 	err = ice_ptp_set_timestamp_mode(pf, &config);
2232 	if (err)
2233 		return err;
2234 
2235 	/* Return the actual configuration set */
2236 	config = pf->ptp.tstamp_config;
2237 
2238 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2239 		-EFAULT : 0;
2240 }
2241 
2242 /**
2243  * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
2244  * @rx_desc: Receive descriptor
2245  * @pkt_ctx: Packet context to get the cached time
2246  *
2247  * The driver receives a notification in the receive descriptor with timestamp.
2248  */
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc * rx_desc,const struct ice_pkt_ctx * pkt_ctx)2249 u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
2250 			const struct ice_pkt_ctx *pkt_ctx)
2251 {
2252 	u64 ts_ns, cached_time;
2253 	u32 ts_high;
2254 
2255 	if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2256 		return 0;
2257 
2258 	cached_time = READ_ONCE(pkt_ctx->cached_phctime);
2259 
2260 	/* Do not report a timestamp if we don't have a cached PHC time */
2261 	if (!cached_time)
2262 		return 0;
2263 
2264 	/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2265 	 * PHC value, rather than accessing the PF. This also allows us to
2266 	 * simply pass the upper 32bits of nanoseconds directly. Calling
2267 	 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2268 	 * bits itself.
2269 	 */
2270 	ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2271 	ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2272 
2273 	return ts_ns;
2274 }
2275 
2276 /**
2277  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2278  * @pf: pointer to the PF structure
2279  * @info: PTP clock info structure
2280  *
2281  * Disable the OS access to the SMA pins. Called to clear out the OS
2282  * indications of pin support when we fail to setup the E810-T SMA control
2283  * register.
2284  */
2285 static void
ice_ptp_disable_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2286 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2287 {
2288 	struct device *dev = ice_pf_to_dev(pf);
2289 
2290 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2291 
2292 	info->enable = NULL;
2293 	info->verify = NULL;
2294 	info->n_pins = 0;
2295 	info->n_ext_ts = 0;
2296 	info->n_per_out = 0;
2297 }
2298 
2299 /**
2300  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2301  * @pf: pointer to the PF structure
2302  * @info: PTP clock info structure
2303  *
2304  * Finish setting up the SMA pins by allocating pin_config, and setting it up
2305  * according to the current status of the SMA. On failure, disable all of the
2306  * extended SMA pin support.
2307  */
2308 static void
ice_ptp_setup_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2309 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2310 {
2311 	struct device *dev = ice_pf_to_dev(pf);
2312 	int err;
2313 
2314 	/* Allocate memory for kernel pins interface */
2315 	info->pin_config = devm_kcalloc(dev, info->n_pins,
2316 					sizeof(*info->pin_config), GFP_KERNEL);
2317 	if (!info->pin_config) {
2318 		ice_ptp_disable_sma_pins_e810t(pf, info);
2319 		return;
2320 	}
2321 
2322 	/* Read current SMA status */
2323 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2324 	if (err)
2325 		ice_ptp_disable_sma_pins_e810t(pf, info);
2326 }
2327 
2328 /**
2329  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2330  * @pf: pointer to the PF instance
2331  * @info: PTP clock capabilities
2332  */
2333 static void
ice_ptp_setup_pins_e810(struct ice_pf * pf,struct ptp_clock_info * info)2334 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2335 {
2336 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2337 		info->n_ext_ts = N_EXT_TS_E810;
2338 		info->n_per_out = N_PER_OUT_E810T;
2339 		info->n_pins = NUM_PTP_PINS_E810T;
2340 		info->verify = ice_verify_pin_e810t;
2341 
2342 		/* Complete setup of the SMA pins */
2343 		ice_ptp_setup_sma_pins_e810t(pf, info);
2344 	} else if (ice_is_e810t(&pf->hw)) {
2345 		info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
2346 		info->n_per_out = N_PER_OUT_NO_SMA_E810T;
2347 	} else {
2348 		info->n_per_out = N_PER_OUT_E810;
2349 		info->n_ext_ts = N_EXT_TS_E810;
2350 	}
2351 }
2352 
2353 /**
2354  * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
2355  * @pf: pointer to the PF instance
2356  * @info: PTP clock capabilities
2357  */
2358 static void
ice_ptp_setup_pins_e823(struct ice_pf * pf,struct ptp_clock_info * info)2359 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2360 {
2361 	info->pps = 1;
2362 	info->n_per_out = 0;
2363 	info->n_ext_ts = 1;
2364 }
2365 
2366 /**
2367  * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
2368  * @pf: Board private structure
2369  * @info: PTP info to fill
2370  *
2371  * Assign functions to the PTP capabiltiies structure for E82x devices.
2372  * Functions which operate across all device families should be set directly
2373  * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
2374  * devices.
2375  */
2376 static void
ice_ptp_set_funcs_e82x(struct ice_pf * pf,struct ptp_clock_info * info)2377 ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
2378 {
2379 #ifdef CONFIG_ICE_HWTS
2380 	if (boot_cpu_has(X86_FEATURE_ART) &&
2381 	    boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2382 		info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
2383 #endif /* CONFIG_ICE_HWTS */
2384 }
2385 
2386 /**
2387  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2388  * @pf: Board private structure
2389  * @info: PTP info to fill
2390  *
2391  * Assign functions to the PTP capabiltiies structure for E810 devices.
2392  * Functions which operate across all device families should be set directly
2393  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2394  * devices.
2395  */
2396 static void
ice_ptp_set_funcs_e810(struct ice_pf * pf,struct ptp_clock_info * info)2397 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2398 {
2399 	info->enable = ice_ptp_gpio_enable_e810;
2400 	ice_ptp_setup_pins_e810(pf, info);
2401 }
2402 
2403 /**
2404  * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
2405  * @pf: Board private structure
2406  * @info: PTP info to fill
2407  *
2408  * Assign functions to the PTP capabiltiies structure for E823 devices.
2409  * Functions which operate across all device families should be set directly
2410  * in ice_ptp_set_caps. Only add functions here which are distinct for e823
2411  * devices.
2412  */
2413 static void
ice_ptp_set_funcs_e823(struct ice_pf * pf,struct ptp_clock_info * info)2414 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
2415 {
2416 	ice_ptp_set_funcs_e82x(pf, info);
2417 
2418 	info->enable = ice_ptp_gpio_enable_e823;
2419 	ice_ptp_setup_pins_e823(pf, info);
2420 }
2421 
2422 /**
2423  * ice_ptp_set_caps - Set PTP capabilities
2424  * @pf: Board private structure
2425  */
ice_ptp_set_caps(struct ice_pf * pf)2426 static void ice_ptp_set_caps(struct ice_pf *pf)
2427 {
2428 	struct ptp_clock_info *info = &pf->ptp.info;
2429 	struct device *dev = ice_pf_to_dev(pf);
2430 
2431 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2432 		 dev_driver_string(dev), dev_name(dev));
2433 	info->owner = THIS_MODULE;
2434 	info->max_adj = 100000000;
2435 	info->adjtime = ice_ptp_adjtime;
2436 	info->adjfine = ice_ptp_adjfine;
2437 	info->gettimex64 = ice_ptp_gettimex64;
2438 	info->settime64 = ice_ptp_settime64;
2439 
2440 	if (ice_is_e810(&pf->hw))
2441 		ice_ptp_set_funcs_e810(pf, info);
2442 	else if (ice_is_e823(&pf->hw))
2443 		ice_ptp_set_funcs_e823(pf, info);
2444 	else
2445 		ice_ptp_set_funcs_e82x(pf, info);
2446 }
2447 
2448 /**
2449  * ice_ptp_create_clock - Create PTP clock device for userspace
2450  * @pf: Board private structure
2451  *
2452  * This function creates a new PTP clock device. It only creates one if we
2453  * don't already have one. Will return error if it can't create one, but success
2454  * if we already have a device. Should be used by ice_ptp_init to create clock
2455  * initially, and prevent global resets from creating new clock devices.
2456  */
ice_ptp_create_clock(struct ice_pf * pf)2457 static long ice_ptp_create_clock(struct ice_pf *pf)
2458 {
2459 	struct ptp_clock_info *info;
2460 	struct device *dev;
2461 
2462 	/* No need to create a clock device if we already have one */
2463 	if (pf->ptp.clock)
2464 		return 0;
2465 
2466 	ice_ptp_set_caps(pf);
2467 
2468 	info = &pf->ptp.info;
2469 	dev = ice_pf_to_dev(pf);
2470 
2471 	/* Attempt to register the clock before enabling the hardware. */
2472 	pf->ptp.clock = ptp_clock_register(info, dev);
2473 	if (IS_ERR(pf->ptp.clock)) {
2474 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
2475 		return PTR_ERR(pf->ptp.clock);
2476 	}
2477 
2478 	return 0;
2479 }
2480 
2481 /**
2482  * ice_ptp_request_ts - Request an available Tx timestamp index
2483  * @tx: the PTP Tx timestamp tracker to request from
2484  * @skb: the SKB to associate with this timestamp request
2485  */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2486 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2487 {
2488 	unsigned long flags;
2489 	u8 idx;
2490 
2491 	spin_lock_irqsave(&tx->lock, flags);
2492 
2493 	/* Check that this tracker is accepting new timestamp requests */
2494 	if (!ice_ptp_is_tx_tracker_up(tx)) {
2495 		spin_unlock_irqrestore(&tx->lock, flags);
2496 		return -1;
2497 	}
2498 
2499 	/* Find and set the first available index */
2500 	idx = find_next_zero_bit(tx->in_use, tx->len,
2501 				 tx->last_ll_ts_idx_read + 1);
2502 	if (idx == tx->len)
2503 		idx = find_first_zero_bit(tx->in_use, tx->len);
2504 
2505 	if (idx < tx->len) {
2506 		/* We got a valid index that no other thread could have set. Store
2507 		 * a reference to the skb and the start time to allow discarding old
2508 		 * requests.
2509 		 */
2510 		set_bit(idx, tx->in_use);
2511 		clear_bit(idx, tx->stale);
2512 		tx->tstamps[idx].start = jiffies;
2513 		tx->tstamps[idx].skb = skb_get(skb);
2514 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2515 		ice_trace(tx_tstamp_request, skb, idx);
2516 	}
2517 
2518 	spin_unlock_irqrestore(&tx->lock, flags);
2519 
2520 	/* return the appropriate PHY timestamp register index, -1 if no
2521 	 * indexes were available.
2522 	 */
2523 	if (idx >= tx->len)
2524 		return -1;
2525 	else
2526 		return idx + tx->offset;
2527 }
2528 
2529 /**
2530  * ice_ptp_process_ts - Process the PTP Tx timestamps
2531  * @pf: Board private structure
2532  *
2533  * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
2534  * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
2535  */
ice_ptp_process_ts(struct ice_pf * pf)2536 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
2537 {
2538 	switch (pf->ptp.tx_interrupt_mode) {
2539 	case ICE_PTP_TX_INTERRUPT_NONE:
2540 		/* This device has the clock owner handle timestamps for it */
2541 		return ICE_TX_TSTAMP_WORK_DONE;
2542 	case ICE_PTP_TX_INTERRUPT_SELF:
2543 		/* This device handles its own timestamps */
2544 		return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2545 	case ICE_PTP_TX_INTERRUPT_ALL:
2546 		/* This device handles timestamps for all ports */
2547 		return ice_ptp_tx_tstamp_owner(pf);
2548 	default:
2549 		WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
2550 			  pf->ptp.tx_interrupt_mode);
2551 		return ICE_TX_TSTAMP_WORK_DONE;
2552 	}
2553 }
2554 
2555 /**
2556  * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
2557  * @pf: Board private structure
2558  *
2559  * The device PHY issues Tx timestamp interrupts to the driver for processing
2560  * timestamp data from the PHY. It will not interrupt again until all
2561  * current timestamp data is read. In rare circumstances, it is possible that
2562  * the driver fails to read all outstanding data.
2563  *
2564  * To avoid getting permanently stuck, periodically check if the PHY has
2565  * outstanding timestamp data. If so, trigger an interrupt from software to
2566  * process this data.
2567  */
ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf * pf)2568 static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
2569 {
2570 	struct device *dev = ice_pf_to_dev(pf);
2571 	struct ice_hw *hw = &pf->hw;
2572 	bool trigger_oicr = false;
2573 	unsigned int i;
2574 
2575 	if (ice_is_e810(hw))
2576 		return;
2577 
2578 	if (!ice_pf_src_tmr_owned(pf))
2579 		return;
2580 
2581 	for (i = 0; i < ICE_MAX_QUAD; i++) {
2582 		u64 tstamp_ready;
2583 		int err;
2584 
2585 		err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
2586 		if (!err && tstamp_ready) {
2587 			trigger_oicr = true;
2588 			break;
2589 		}
2590 	}
2591 
2592 	if (trigger_oicr) {
2593 		/* Trigger a software interrupt, to ensure this data
2594 		 * gets processed.
2595 		 */
2596 		dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
2597 
2598 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
2599 		ice_flush(hw);
2600 	}
2601 }
2602 
ice_ptp_periodic_work(struct kthread_work * work)2603 static void ice_ptp_periodic_work(struct kthread_work *work)
2604 {
2605 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2606 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2607 	int err;
2608 
2609 	if (pf->ptp.state != ICE_PTP_READY)
2610 		return;
2611 
2612 	err = ice_ptp_update_cached_phctime(pf);
2613 
2614 	ice_ptp_maybe_trigger_tx_interrupt(pf);
2615 
2616 	/* Run twice a second or reschedule if phc update failed */
2617 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2618 				   msecs_to_jiffies(err ? 10 : 500));
2619 }
2620 
2621 /**
2622  * ice_ptp_prepare_for_reset - Prepare PTP for reset
2623  * @pf: Board private structure
2624  * @reset_type: the reset type being performed
2625  */
ice_ptp_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)2626 void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
2627 {
2628 	struct ice_ptp *ptp = &pf->ptp;
2629 	u8 src_tmr;
2630 
2631 	if (ptp->state != ICE_PTP_READY)
2632 		return;
2633 
2634 	ptp->state = ICE_PTP_RESETTING;
2635 
2636 	/* Disable timestamping for both Tx and Rx */
2637 	ice_ptp_disable_timestamp_mode(pf);
2638 
2639 	kthread_cancel_delayed_work_sync(&ptp->work);
2640 
2641 	if (reset_type == ICE_RESET_PFR)
2642 		return;
2643 
2644 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2645 
2646 	/* Disable periodic outputs */
2647 	ice_ptp_disable_all_clkout(pf);
2648 
2649 	src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2650 
2651 	/* Disable source clock */
2652 	wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2653 
2654 	/* Acquire PHC and system timer to restore after reset */
2655 	ptp->reset_time = ktime_get_real_ns();
2656 }
2657 
2658 /**
2659  * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
2660  * @pf: Board private structure
2661  *
2662  * Companion function for ice_ptp_rebuild() which handles tasks that only the
2663  * PTP clock owner instance should perform.
2664  */
ice_ptp_rebuild_owner(struct ice_pf * pf)2665 static int ice_ptp_rebuild_owner(struct ice_pf *pf)
2666 {
2667 	struct ice_ptp *ptp = &pf->ptp;
2668 	struct ice_hw *hw = &pf->hw;
2669 	struct timespec64 ts;
2670 	u64 time_diff;
2671 	int err;
2672 
2673 	err = ice_ptp_init_phc(hw);
2674 	if (err)
2675 		return err;
2676 
2677 	/* Acquire the global hardware lock */
2678 	if (!ice_ptp_lock(hw)) {
2679 		err = -EBUSY;
2680 		return err;
2681 	}
2682 
2683 	/* Write the increment time value to PHY and LAN */
2684 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2685 	if (err) {
2686 		ice_ptp_unlock(hw);
2687 		return err;
2688 	}
2689 
2690 	/* Write the initial Time value to PHY and LAN using the cached PHC
2691 	 * time before the reset and time difference between stopping and
2692 	 * starting the clock.
2693 	 */
2694 	if (ptp->cached_phc_time) {
2695 		time_diff = ktime_get_real_ns() - ptp->reset_time;
2696 		ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2697 	} else {
2698 		ts = ktime_to_timespec64(ktime_get_real());
2699 	}
2700 	err = ice_ptp_write_init(pf, &ts);
2701 	if (err) {
2702 		ice_ptp_unlock(hw);
2703 		return err;
2704 	}
2705 
2706 	/* Release the global hardware lock */
2707 	ice_ptp_unlock(hw);
2708 
2709 	/* Flush software tracking of any outstanding timestamps since we're
2710 	 * about to flush the PHY timestamp block.
2711 	 */
2712 	ice_ptp_flush_all_tx_tracker(pf);
2713 
2714 	if (!ice_is_e810(hw)) {
2715 		/* Enable quad interrupts */
2716 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
2717 		if (err)
2718 			return err;
2719 
2720 		ice_ptp_restart_all_phy(pf);
2721 	}
2722 
2723 	return 0;
2724 }
2725 
2726 /**
2727  * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
2728  * @pf: Board private structure
2729  * @reset_type: the reset type being performed
2730  */
ice_ptp_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)2731 void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
2732 {
2733 	struct ice_ptp *ptp = &pf->ptp;
2734 	int err;
2735 
2736 	if (ptp->state == ICE_PTP_READY) {
2737 		ice_ptp_prepare_for_reset(pf, reset_type);
2738 	} else if (ptp->state != ICE_PTP_RESETTING) {
2739 		err = -EINVAL;
2740 		dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
2741 		goto err;
2742 	}
2743 
2744 	if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
2745 		err = ice_ptp_rebuild_owner(pf);
2746 		if (err)
2747 			goto err;
2748 	}
2749 
2750 	ptp->state = ICE_PTP_READY;
2751 
2752 	/* Start periodic work going */
2753 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2754 
2755 	dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2756 	return;
2757 
2758 err:
2759 	ptp->state = ICE_PTP_ERROR;
2760 	dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2761 }
2762 
2763 /**
2764  * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
2765  * @aux_dev: auxiliary device to get the auxiliary PF for
2766  */
2767 static struct ice_pf *
ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device * aux_dev)2768 ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
2769 {
2770 	struct ice_ptp_port *aux_port;
2771 	struct ice_ptp *aux_ptp;
2772 
2773 	aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
2774 	aux_ptp = container_of(aux_port, struct ice_ptp, port);
2775 
2776 	return container_of(aux_ptp, struct ice_pf, ptp);
2777 }
2778 
2779 /**
2780  * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
2781  * @aux_dev: auxiliary device to get the PF for
2782  */
2783 static struct ice_pf *
ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device * aux_dev)2784 ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
2785 {
2786 	struct ice_ptp_port_owner *ports_owner;
2787 	struct auxiliary_driver *aux_drv;
2788 	struct ice_ptp *owner_ptp;
2789 
2790 	if (!aux_dev->dev.driver)
2791 		return NULL;
2792 
2793 	aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
2794 	ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
2795 				   aux_driver);
2796 	owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
2797 	return container_of(owner_ptp, struct ice_pf, ptp);
2798 }
2799 
2800 /**
2801  * ice_ptp_auxbus_probe - Probe auxiliary devices
2802  * @aux_dev: PF's auxiliary device
2803  * @id: Auxiliary device ID
2804  */
ice_ptp_auxbus_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * id)2805 static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
2806 				const struct auxiliary_device_id *id)
2807 {
2808 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2809 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2810 
2811 	if (WARN_ON(!owner_pf))
2812 		return -ENODEV;
2813 
2814 	INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
2815 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2816 	list_add(&aux_pf->ptp.port.list_member,
2817 		 &owner_pf->ptp.ports_owner.ports);
2818 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2819 
2820 	return 0;
2821 }
2822 
2823 /**
2824  * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
2825  * @aux_dev: PF's auxiliary device
2826  */
ice_ptp_auxbus_remove(struct auxiliary_device * aux_dev)2827 static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
2828 {
2829 	struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2830 	struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
2831 
2832 	mutex_lock(&owner_pf->ptp.ports_owner.lock);
2833 	list_del(&aux_pf->ptp.port.list_member);
2834 	mutex_unlock(&owner_pf->ptp.ports_owner.lock);
2835 }
2836 
2837 /**
2838  * ice_ptp_auxbus_shutdown
2839  * @aux_dev: PF's auxiliary device
2840  */
ice_ptp_auxbus_shutdown(struct auxiliary_device * aux_dev)2841 static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
2842 {
2843 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2844 }
2845 
2846 /**
2847  * ice_ptp_auxbus_suspend
2848  * @aux_dev: PF's auxiliary device
2849  * @state: power management state indicator
2850  */
2851 static int
ice_ptp_auxbus_suspend(struct auxiliary_device * aux_dev,pm_message_t state)2852 ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
2853 {
2854 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2855 	return 0;
2856 }
2857 
2858 /**
2859  * ice_ptp_auxbus_resume
2860  * @aux_dev: PF's auxiliary device
2861  */
ice_ptp_auxbus_resume(struct auxiliary_device * aux_dev)2862 static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
2863 {
2864 	/* Doing nothing here, but handle to auxbus driver must be satisfied */
2865 	return 0;
2866 }
2867 
2868 /**
2869  * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
2870  * @pf: Board private structure
2871  * @name: auxiliary bus driver name
2872  */
2873 static struct auxiliary_device_id *
ice_ptp_auxbus_create_id_table(struct ice_pf * pf,const char * name)2874 ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
2875 {
2876 	struct auxiliary_device_id *ids;
2877 
2878 	/* Second id left empty to terminate the array */
2879 	ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
2880 			   sizeof(struct auxiliary_device_id), GFP_KERNEL);
2881 	if (!ids)
2882 		return NULL;
2883 
2884 	snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
2885 
2886 	return ids;
2887 }
2888 
2889 /**
2890  * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
2891  * @pf: Board private structure
2892  */
ice_ptp_register_auxbus_driver(struct ice_pf * pf)2893 static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
2894 {
2895 	struct auxiliary_driver *aux_driver;
2896 	struct ice_ptp *ptp;
2897 	struct device *dev;
2898 	char *name;
2899 	int err;
2900 
2901 	ptp = &pf->ptp;
2902 	dev = ice_pf_to_dev(pf);
2903 	aux_driver = &ptp->ports_owner.aux_driver;
2904 	INIT_LIST_HEAD(&ptp->ports_owner.ports);
2905 	mutex_init(&ptp->ports_owner.lock);
2906 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
2907 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
2908 			      ice_get_ptp_src_clock_index(&pf->hw));
2909 	if (!name)
2910 		return -ENOMEM;
2911 
2912 	aux_driver->name = name;
2913 	aux_driver->shutdown = ice_ptp_auxbus_shutdown;
2914 	aux_driver->suspend = ice_ptp_auxbus_suspend;
2915 	aux_driver->remove = ice_ptp_auxbus_remove;
2916 	aux_driver->resume = ice_ptp_auxbus_resume;
2917 	aux_driver->probe = ice_ptp_auxbus_probe;
2918 	aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
2919 	if (!aux_driver->id_table)
2920 		return -ENOMEM;
2921 
2922 	err = auxiliary_driver_register(aux_driver);
2923 	if (err) {
2924 		devm_kfree(dev, aux_driver->id_table);
2925 		dev_err(dev, "Failed registering aux_driver, name <%s>\n",
2926 			name);
2927 	}
2928 
2929 	return err;
2930 }
2931 
2932 /**
2933  * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
2934  * @pf: Board private structure
2935  */
ice_ptp_unregister_auxbus_driver(struct ice_pf * pf)2936 static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
2937 {
2938 	struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
2939 
2940 	auxiliary_driver_unregister(aux_driver);
2941 	devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
2942 
2943 	mutex_destroy(&pf->ptp.ports_owner.lock);
2944 }
2945 
2946 /**
2947  * ice_ptp_clock_index - Get the PTP clock index for this device
2948  * @pf: Board private structure
2949  *
2950  * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
2951  * is associated.
2952  */
ice_ptp_clock_index(struct ice_pf * pf)2953 int ice_ptp_clock_index(struct ice_pf *pf)
2954 {
2955 	struct auxiliary_device *aux_dev;
2956 	struct ice_pf *owner_pf;
2957 	struct ptp_clock *clock;
2958 
2959 	aux_dev = &pf->ptp.port.aux_dev;
2960 	owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
2961 	if (!owner_pf)
2962 		return -1;
2963 	clock = owner_pf->ptp.clock;
2964 
2965 	return clock ? ptp_clock_index(clock) : -1;
2966 }
2967 
2968 /**
2969  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
2970  * @pf: Board private structure
2971  *
2972  * Setup and initialize a PTP clock device that represents the device hardware
2973  * clock. Save the clock index for other functions connected to the same
2974  * hardware resource.
2975  */
ice_ptp_init_owner(struct ice_pf * pf)2976 static int ice_ptp_init_owner(struct ice_pf *pf)
2977 {
2978 	struct ice_hw *hw = &pf->hw;
2979 	struct timespec64 ts;
2980 	int err;
2981 
2982 	err = ice_ptp_init_phc(hw);
2983 	if (err) {
2984 		dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
2985 			err);
2986 		return err;
2987 	}
2988 
2989 	/* Acquire the global hardware lock */
2990 	if (!ice_ptp_lock(hw)) {
2991 		err = -EBUSY;
2992 		goto err_exit;
2993 	}
2994 
2995 	/* Write the increment time value to PHY and LAN */
2996 	err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2997 	if (err) {
2998 		ice_ptp_unlock(hw);
2999 		goto err_exit;
3000 	}
3001 
3002 	ts = ktime_to_timespec64(ktime_get_real());
3003 	/* Write the initial Time value to PHY and LAN */
3004 	err = ice_ptp_write_init(pf, &ts);
3005 	if (err) {
3006 		ice_ptp_unlock(hw);
3007 		goto err_exit;
3008 	}
3009 
3010 	/* Release the global hardware lock */
3011 	ice_ptp_unlock(hw);
3012 
3013 	if (!ice_is_e810(hw)) {
3014 		/* Enable quad interrupts */
3015 		err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
3016 		if (err)
3017 			goto err_exit;
3018 	}
3019 
3020 	/* Ensure we have a clock device */
3021 	err = ice_ptp_create_clock(pf);
3022 	if (err)
3023 		goto err_clk;
3024 
3025 	err = ice_ptp_register_auxbus_driver(pf);
3026 	if (err) {
3027 		dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
3028 		goto err_aux;
3029 	}
3030 
3031 	return 0;
3032 err_aux:
3033 	ptp_clock_unregister(pf->ptp.clock);
3034 err_clk:
3035 	pf->ptp.clock = NULL;
3036 err_exit:
3037 	return err;
3038 }
3039 
3040 /**
3041  * ice_ptp_init_work - Initialize PTP work threads
3042  * @pf: Board private structure
3043  * @ptp: PF PTP structure
3044  */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)3045 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
3046 {
3047 	struct kthread_worker *kworker;
3048 
3049 	/* Initialize work functions */
3050 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
3051 
3052 	/* Allocate a kworker for handling work required for the ports
3053 	 * connected to the PTP hardware clock.
3054 	 */
3055 	kworker = kthread_create_worker(0, "ice-ptp-%s",
3056 					dev_name(ice_pf_to_dev(pf)));
3057 	if (IS_ERR(kworker))
3058 		return PTR_ERR(kworker);
3059 
3060 	ptp->kworker = kworker;
3061 
3062 	/* Start periodic work going */
3063 	kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
3064 
3065 	return 0;
3066 }
3067 
3068 /**
3069  * ice_ptp_init_port - Initialize PTP port structure
3070  * @pf: Board private structure
3071  * @ptp_port: PTP port structure
3072  */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)3073 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
3074 {
3075 	struct ice_hw *hw = &pf->hw;
3076 
3077 	mutex_init(&ptp_port->ps_lock);
3078 
3079 	switch (hw->phy_model) {
3080 	case ICE_PHY_E810:
3081 		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
3082 	case ICE_PHY_E82X:
3083 		kthread_init_delayed_work(&ptp_port->ov_work,
3084 					  ice_ptp_wait_for_offsets);
3085 
3086 		return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
3087 					    ptp_port->port_num);
3088 	default:
3089 		return -ENODEV;
3090 	}
3091 }
3092 
3093 /**
3094  * ice_ptp_release_auxbus_device
3095  * @dev: device that utilizes the auxbus
3096  */
ice_ptp_release_auxbus_device(struct device * dev)3097 static void ice_ptp_release_auxbus_device(struct device *dev)
3098 {
3099 	/* Doing nothing here, but handle to auxbux device must be satisfied */
3100 }
3101 
3102 /**
3103  * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
3104  * @pf: Board private structure
3105  */
ice_ptp_create_auxbus_device(struct ice_pf * pf)3106 static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
3107 {
3108 	struct auxiliary_device *aux_dev;
3109 	struct ice_ptp *ptp;
3110 	struct device *dev;
3111 	char *name;
3112 	int err;
3113 	u32 id;
3114 
3115 	ptp = &pf->ptp;
3116 	id = ptp->port.port_num;
3117 	dev = ice_pf_to_dev(pf);
3118 
3119 	aux_dev = &ptp->port.aux_dev;
3120 
3121 	name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
3122 			      pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
3123 			      ice_get_ptp_src_clock_index(&pf->hw));
3124 	if (!name)
3125 		return -ENOMEM;
3126 
3127 	aux_dev->name = name;
3128 	aux_dev->id = id;
3129 	aux_dev->dev.release = ice_ptp_release_auxbus_device;
3130 	aux_dev->dev.parent = dev;
3131 
3132 	err = auxiliary_device_init(aux_dev);
3133 	if (err)
3134 		goto aux_err;
3135 
3136 	err = auxiliary_device_add(aux_dev);
3137 	if (err) {
3138 		auxiliary_device_uninit(aux_dev);
3139 		goto aux_err;
3140 	}
3141 
3142 	return 0;
3143 aux_err:
3144 	dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
3145 	devm_kfree(dev, name);
3146 	return err;
3147 }
3148 
3149 /**
3150  * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
3151  * @pf: Board private structure
3152  */
ice_ptp_remove_auxbus_device(struct ice_pf * pf)3153 static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
3154 {
3155 	struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
3156 
3157 	auxiliary_device_delete(aux_dev);
3158 	auxiliary_device_uninit(aux_dev);
3159 
3160 	memset(aux_dev, 0, sizeof(*aux_dev));
3161 }
3162 
3163 /**
3164  * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
3165  * @pf: Board private structure
3166  *
3167  * Initialize the Tx timestamp interrupt mode for this device. For most device
3168  * types, each PF processes the interrupt and manages its own timestamps. For
3169  * E822-based devices, only the clock owner processes the timestamps. Other
3170  * PFs disable the interrupt and do not process their own timestamps.
3171  */
ice_ptp_init_tx_interrupt_mode(struct ice_pf * pf)3172 static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
3173 {
3174 	switch (pf->hw.phy_model) {
3175 	case ICE_PHY_E82X:
3176 		/* E822 based PHY has the clock owner process the interrupt
3177 		 * for all ports.
3178 		 */
3179 		if (ice_pf_src_tmr_owned(pf))
3180 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
3181 		else
3182 			pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
3183 		break;
3184 	default:
3185 		/* other PHY types handle their own Tx interrupt */
3186 		pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
3187 	}
3188 }
3189 
3190 /**
3191  * ice_ptp_init - Initialize PTP hardware clock support
3192  * @pf: Board private structure
3193  *
3194  * Set up the device for interacting with the PTP hardware clock for all
3195  * functions, both the function that owns the clock hardware, and the
3196  * functions connected to the clock hardware.
3197  *
3198  * The clock owner will allocate and register a ptp_clock with the
3199  * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
3200  * items used for asynchronous work such as Tx timestamps and periodic work.
3201  */
ice_ptp_init(struct ice_pf * pf)3202 void ice_ptp_init(struct ice_pf *pf)
3203 {
3204 	struct ice_ptp *ptp = &pf->ptp;
3205 	struct ice_hw *hw = &pf->hw;
3206 	int err;
3207 
3208 	ptp->state = ICE_PTP_INITIALIZING;
3209 
3210 	ice_ptp_init_phy_model(hw);
3211 
3212 	ice_ptp_init_tx_interrupt_mode(pf);
3213 
3214 	/* If this function owns the clock hardware, it must allocate and
3215 	 * configure the PTP clock device to represent it.
3216 	 */
3217 	if (ice_pf_src_tmr_owned(pf)) {
3218 		err = ice_ptp_init_owner(pf);
3219 		if (err)
3220 			goto err;
3221 	}
3222 
3223 	ptp->port.port_num = hw->pf_id;
3224 	err = ice_ptp_init_port(pf, &ptp->port);
3225 	if (err)
3226 		goto err;
3227 
3228 	/* Start the PHY timestamping block */
3229 	ice_ptp_reset_phy_timestamping(pf);
3230 
3231 	/* Configure initial Tx interrupt settings */
3232 	ice_ptp_cfg_tx_interrupt(pf);
3233 
3234 	err = ice_ptp_create_auxbus_device(pf);
3235 	if (err)
3236 		goto err;
3237 
3238 	ptp->state = ICE_PTP_READY;
3239 
3240 	err = ice_ptp_init_work(pf, ptp);
3241 	if (err)
3242 		goto err;
3243 
3244 	dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
3245 	return;
3246 
3247 err:
3248 	/* If we registered a PTP clock, release it */
3249 	if (pf->ptp.clock) {
3250 		ptp_clock_unregister(ptp->clock);
3251 		pf->ptp.clock = NULL;
3252 	}
3253 	ptp->state = ICE_PTP_ERROR;
3254 	dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
3255 }
3256 
3257 /**
3258  * ice_ptp_release - Disable the driver/HW support and unregister the clock
3259  * @pf: Board private structure
3260  *
3261  * This function handles the cleanup work required from the initialization by
3262  * clearing out the important information and unregistering the clock
3263  */
ice_ptp_release(struct ice_pf * pf)3264 void ice_ptp_release(struct ice_pf *pf)
3265 {
3266 	if (pf->ptp.state != ICE_PTP_READY)
3267 		return;
3268 
3269 	pf->ptp.state = ICE_PTP_UNINIT;
3270 
3271 	/* Disable timestamping for both Tx and Rx */
3272 	ice_ptp_disable_timestamp_mode(pf);
3273 
3274 	ice_ptp_remove_auxbus_device(pf);
3275 
3276 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
3277 
3278 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
3279 
3280 	ice_ptp_port_phy_stop(&pf->ptp.port);
3281 	mutex_destroy(&pf->ptp.port.ps_lock);
3282 	if (pf->ptp.kworker) {
3283 		kthread_destroy_worker(pf->ptp.kworker);
3284 		pf->ptp.kworker = NULL;
3285 	}
3286 
3287 	if (ice_pf_src_tmr_owned(pf))
3288 		ice_ptp_unregister_auxbus_driver(pf);
3289 
3290 	if (!pf->ptp.clock)
3291 		return;
3292 
3293 	/* Disable periodic outputs */
3294 	ice_ptp_disable_all_clkout(pf);
3295 
3296 	ptp_clock_unregister(pf->ptp.clock);
3297 	pf->ptp.clock = NULL;
3298 
3299 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
3300 }
3301