xref: /linux/drivers/net/ethernet/qlogic/qed/qed_ptp.c (revision 52338415)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/types.h>
33 #include "qed.h"
34 #include "qed_dev_api.h"
35 #include "qed_hw.h"
36 #include "qed_l2.h"
37 #include "qed_mcp.h"
38 #include "qed_reg_addr.h"
39 
40 /* 16 nano second time quantas to wait before making a Drift adjustment */
41 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT	0
42 /* Nano seconds to add/subtract when making a Drift adjustment */
43 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT		28
44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT		31
46 #define QED_TIMESTAMP_MASK			BIT(16)
47 /* Param mask for Hardware to detect/timestamp the unicast PTP packets */
48 #define QED_PTP_UCAST_PARAM_MASK		0xF
49 
50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
51 {
52 	switch (MFW_PORT(p_hwfn)) {
53 	case 0:
54 		return QED_RESC_LOCK_PTP_PORT0;
55 	case 1:
56 		return QED_RESC_LOCK_PTP_PORT1;
57 	case 2:
58 		return QED_RESC_LOCK_PTP_PORT2;
59 	case 3:
60 		return QED_RESC_LOCK_PTP_PORT3;
61 	default:
62 		return QED_RESC_LOCK_RESC_INVALID;
63 	}
64 }
65 
66 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
67 {
68 	struct qed_resc_lock_params params;
69 	enum qed_resc_lock resource;
70 	int rc;
71 
72 	resource = qed_ptcdev_to_resc(p_hwfn);
73 	if (resource == QED_RESC_LOCK_RESC_INVALID)
74 		return -EINVAL;
75 
76 	qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
77 
78 	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
79 	if (rc && rc != -EINVAL) {
80 		return rc;
81 	} else if (rc == -EINVAL) {
82 		/* MFW doesn't support resource locking, first PF on the port
83 		 * has lock ownership.
84 		 */
85 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
86 			return 0;
87 
88 		DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
89 		return -EBUSY;
90 	} else if (!rc && !params.b_granted) {
91 		DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
92 		return -EBUSY;
93 	}
94 
95 	return rc;
96 }
97 
98 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
99 {
100 	struct qed_resc_unlock_params params;
101 	enum qed_resc_lock resource;
102 	int rc;
103 
104 	resource = qed_ptcdev_to_resc(p_hwfn);
105 	if (resource == QED_RESC_LOCK_RESC_INVALID)
106 		return -EINVAL;
107 
108 	qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
109 
110 	rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
111 	if (rc == -EINVAL) {
112 		/* MFW doesn't support locking, first PF has lock ownership */
113 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
114 			rc = 0;
115 		} else {
116 			DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
117 			return -EINVAL;
118 		}
119 	} else if (rc) {
120 		DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
121 	}
122 
123 	return rc;
124 }
125 
126 /* Read Rx timestamp */
127 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
128 {
129 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
130 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
131 	u32 val;
132 
133 	*timestamp = 0;
134 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
135 	if (!(val & QED_TIMESTAMP_MASK)) {
136 		DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
137 		return -EINVAL;
138 	}
139 
140 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
141 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
142 	*timestamp <<= 32;
143 	*timestamp |= val;
144 
145 	/* Reset timestamp register to allow new timestamp */
146 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
147 	       QED_TIMESTAMP_MASK);
148 
149 	return 0;
150 }
151 
152 /* Read Tx timestamp */
153 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
154 {
155 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
156 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
157 	u32 val;
158 
159 	*timestamp = 0;
160 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
161 	if (!(val & QED_TIMESTAMP_MASK)) {
162 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
163 			   "Invalid Tx timestamp, buf_seqid = %08x\n", val);
164 		return -EINVAL;
165 	}
166 
167 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
168 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
169 	*timestamp <<= 32;
170 	*timestamp |= val;
171 
172 	/* Reset timestamp register to allow new timestamp */
173 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
174 
175 	return 0;
176 }
177 
178 /* Read Phy Hardware Clock */
179 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
180 {
181 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
182 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
183 	u32 temp = 0;
184 
185 	temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
186 	*phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
187 	*phc_cycles <<= 32;
188 	*phc_cycles |= temp;
189 
190 	return 0;
191 }
192 
193 /* Filter PTP protocol packets that need to be timestamped */
194 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
195 				  enum qed_ptp_filter_type rx_type,
196 				  enum qed_ptp_hwtstamp_tx_type tx_type)
197 {
198 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
199 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
200 	u32 rule_mask, enable_cfg = 0x0;
201 
202 	switch (rx_type) {
203 	case QED_PTP_FILTER_NONE:
204 		enable_cfg = 0x0;
205 		rule_mask = 0x3FFF;
206 		break;
207 	case QED_PTP_FILTER_ALL:
208 		enable_cfg = 0x7;
209 		rule_mask = 0x3CAA;
210 		break;
211 	case QED_PTP_FILTER_V1_L4_EVENT:
212 		enable_cfg = 0x3;
213 		rule_mask = 0x3FFA;
214 		break;
215 	case QED_PTP_FILTER_V1_L4_GEN:
216 		enable_cfg = 0x3;
217 		rule_mask = 0x3FFE;
218 		break;
219 	case QED_PTP_FILTER_V2_L4_EVENT:
220 		enable_cfg = 0x5;
221 		rule_mask = 0x3FAA;
222 		break;
223 	case QED_PTP_FILTER_V2_L4_GEN:
224 		enable_cfg = 0x5;
225 		rule_mask = 0x3FEE;
226 		break;
227 	case QED_PTP_FILTER_V2_L2_EVENT:
228 		enable_cfg = 0x5;
229 		rule_mask = 0x3CFF;
230 		break;
231 	case QED_PTP_FILTER_V2_L2_GEN:
232 		enable_cfg = 0x5;
233 		rule_mask = 0x3EFF;
234 		break;
235 	case QED_PTP_FILTER_V2_EVENT:
236 		enable_cfg = 0x5;
237 		rule_mask = 0x3CAA;
238 		break;
239 	case QED_PTP_FILTER_V2_GEN:
240 		enable_cfg = 0x5;
241 		rule_mask = 0x3EEE;
242 		break;
243 	default:
244 		DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
245 		return -EINVAL;
246 	}
247 
248 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
249 	       QED_PTP_UCAST_PARAM_MASK);
250 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
251 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
252 
253 	if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
254 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
255 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
256 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
257 	} else {
258 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
259 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
260 		       QED_PTP_UCAST_PARAM_MASK);
261 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
262 	}
263 
264 	/* Reset possibly old timestamps */
265 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
266 	       QED_TIMESTAMP_MASK);
267 
268 	return 0;
269 }
270 
271 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
272  * FW/HW accepts the adjustment value in terms of 3 parameters:
273  *   Drift period - adjustment happens once in certain number of nano seconds.
274  *   Drift value - time is adjusted by a certain value, for example by 5 ns.
275  *   Drift direction - add or subtract the adjustment value.
276  * The routine translates ppb into the adjustment triplet in an optimal manner.
277  */
278 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
279 {
280 	s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
281 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
282 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
283 	u32 drift_ctr_cfg = 0, drift_state;
284 	int drift_dir = 1;
285 
286 	if (ppb < 0) {
287 		ppb = -ppb;
288 		drift_dir = 0;
289 	}
290 
291 	if (ppb > 1) {
292 		s64 best_dif = ppb, best_approx_dev = 1;
293 
294 		/* Adjustment value is up to +/-7ns, find an optimal value in
295 		 * this range.
296 		 */
297 		for (val = 7; val > 0; val--) {
298 			period = div_s64(val * 1000000000, ppb);
299 			period -= 8;
300 			period >>= 4;
301 			if (period < 1)
302 				period = 1;
303 			if (period > 0xFFFFFFE)
304 				period = 0xFFFFFFE;
305 
306 			/* Check both rounding ends for approximate error */
307 			approx_dev = period * 16 + 8;
308 			dif = ppb * approx_dev - val * 1000000000;
309 			dif2 = dif + 16 * ppb;
310 
311 			if (dif < 0)
312 				dif = -dif;
313 			if (dif2 < 0)
314 				dif2 = -dif2;
315 
316 			/* Determine which end gives better approximation */
317 			if (dif * (approx_dev + 16) > dif2 * approx_dev) {
318 				period++;
319 				approx_dev += 16;
320 				dif = dif2;
321 			}
322 
323 			/* Track best approximation found so far */
324 			if (best_dif * approx_dev > dif * best_approx_dev) {
325 				best_dif = dif;
326 				best_val = val;
327 				best_period = period;
328 				best_approx_dev = approx_dev;
329 			}
330 		}
331 	} else if (ppb == 1) {
332 		/* This is a special case as its the only value which wouldn't
333 		 * fit in a s64 variable. In order to prevent castings simple
334 		 * handle it seperately.
335 		 */
336 		best_val = 4;
337 		best_period = 0xee6b27f;
338 	} else {
339 		best_val = 0;
340 		best_period = 0xFFFFFFF;
341 	}
342 
343 	drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
344 			(((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
345 			(((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
346 
347 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
348 
349 	drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
350 	if (drift_state & 1) {
351 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
352 		       drift_ctr_cfg);
353 	} else {
354 		DP_INFO(p_hwfn, "Drift counter is not reset\n");
355 		return -EINVAL;
356 	}
357 
358 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
359 
360 	return 0;
361 }
362 
363 static int qed_ptp_hw_enable(struct qed_dev *cdev)
364 {
365 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
366 	struct qed_ptt *p_ptt;
367 	int rc;
368 
369 	p_ptt = qed_ptt_acquire(p_hwfn);
370 	if (!p_ptt) {
371 		DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
372 		return -EBUSY;
373 	}
374 
375 	p_hwfn->p_ptp_ptt = p_ptt;
376 
377 	rc = qed_ptp_res_lock(p_hwfn, p_ptt);
378 	if (rc) {
379 		DP_INFO(p_hwfn,
380 			"Couldn't acquire the resource lock, skip ptp enable for this PF\n");
381 		qed_ptt_release(p_hwfn, p_ptt);
382 		p_hwfn->p_ptp_ptt = NULL;
383 		return rc;
384 	}
385 
386 	/* Reset PTP event detection rules - will be configured in the IOCTL */
387 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
388 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
389 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
390 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
391 
392 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
393 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
394 
395 	qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
396 
397 	/* Pause free running counter */
398 	if (QED_IS_BB_B0(p_hwfn->cdev))
399 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
400 	if (QED_IS_AH(p_hwfn->cdev))
401 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
402 
403 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
404 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
405 	/* Resume free running counter */
406 	if (QED_IS_BB_B0(p_hwfn->cdev))
407 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
408 	if (QED_IS_AH(p_hwfn->cdev)) {
409 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
410 		qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
411 	}
412 
413 	/* Disable drift register */
414 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
415 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
416 
417 	/* Reset possibly old timestamps */
418 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
419 	       QED_TIMESTAMP_MASK);
420 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
421 
422 	return 0;
423 }
424 
425 static int qed_ptp_hw_disable(struct qed_dev *cdev)
426 {
427 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
428 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
429 
430 	qed_ptp_res_unlock(p_hwfn, p_ptt);
431 
432 	/* Reset PTP event detection rules */
433 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
434 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
435 
436 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
437 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
438 
439 	/* Disable the PTP feature */
440 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
441 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
442 
443 	qed_ptt_release(p_hwfn, p_ptt);
444 	p_hwfn->p_ptp_ptt = NULL;
445 
446 	return 0;
447 }
448 
449 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
450 	.cfg_filters = qed_ptp_hw_cfg_filters,
451 	.read_rx_ts = qed_ptp_hw_read_rx_ts,
452 	.read_tx_ts = qed_ptp_hw_read_tx_ts,
453 	.read_cc = qed_ptp_hw_read_cc,
454 	.adjfreq = qed_ptp_hw_adjfreq,
455 	.disable = qed_ptp_hw_disable,
456 	.enable = qed_ptp_hw_enable,
457 };
458