xref: /linux/drivers/net/can/peak_canfd/peak_canfd.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
3  * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
4  *
5  * Copyright (C) 2016  PEAK System-Technik GmbH
6  */
7 
8 #include <linux/can.h>
9 #include <linux/can/dev.h>
10 #include <linux/ethtool.h>
11 
12 #include "peak_canfd_user.h"
13 
14 /* internal IP core cache size (used as default echo skbs max number) */
15 #define PCANFD_ECHO_SKB_MAX		24
16 
17 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
18 static const struct can_bittiming_const peak_canfd_nominal_const = {
19 	.name = "peak_canfd",
20 	.tseg1_min = 1,
21 	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
22 	.tseg2_min = 1,
23 	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
24 	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
25 	.brp_min = 1,
26 	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
27 	.brp_inc = 1,
28 };
29 
30 static const struct can_bittiming_const peak_canfd_data_const = {
31 	.name = "peak_canfd",
32 	.tseg1_min = 1,
33 	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
34 	.tseg2_min = 1,
35 	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
36 	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
37 	.brp_min = 1,
38 	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
39 	.brp_inc = 1,
40 };
41 
42 static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
43 {
44 	priv->cmd_len = 0;
45 	return priv;
46 }
47 
48 static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
49 {
50 	struct pucan_command *cmd;
51 
52 	if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
53 		return NULL;
54 
55 	cmd = priv->cmd_buffer + priv->cmd_len;
56 
57 	/* reset all unused bit to default */
58 	memset(cmd, 0, sizeof(*cmd));
59 
60 	cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
61 	priv->cmd_len += sizeof(*cmd);
62 
63 	return cmd;
64 }
65 
66 static int pucan_write_cmd(struct peak_canfd_priv *priv)
67 {
68 	int err;
69 
70 	if (priv->pre_cmd) {
71 		err = priv->pre_cmd(priv);
72 		if (err)
73 			return err;
74 	}
75 
76 	err = priv->write_cmd(priv);
77 	if (err)
78 		return err;
79 
80 	if (priv->post_cmd)
81 		err = priv->post_cmd(priv);
82 
83 	return err;
84 }
85 
86 /* uCAN commands interface functions */
87 static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
88 {
89 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
90 	return pucan_write_cmd(priv);
91 }
92 
93 static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
94 {
95 	int err;
96 
97 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
98 	err = pucan_write_cmd(priv);
99 	if (!err)
100 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
101 
102 	return err;
103 }
104 
105 static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
106 {
107 	int err;
108 
109 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
110 	err = pucan_write_cmd(priv);
111 	if (!err)
112 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
113 
114 	return err;
115 }
116 
117 static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
118 				 const struct can_bittiming *pbt)
119 {
120 	struct pucan_timing_slow *cmd;
121 
122 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
123 
124 	cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
125 				       priv->can.ctrlmode &
126 				       CAN_CTRLMODE_3_SAMPLES);
127 	cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
128 	cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
129 	cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
130 
131 	cmd->ewl = 96;	/* default */
132 
133 	netdev_dbg(priv->ndev,
134 		   "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
135 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
136 
137 	return pucan_write_cmd(priv);
138 }
139 
140 static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
141 				 const struct can_bittiming *pbt)
142 {
143 	struct pucan_timing_fast *cmd;
144 
145 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
146 
147 	cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
148 	cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
149 	cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
150 	cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
151 
152 	netdev_dbg(priv->ndev,
153 		   "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
154 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
155 
156 	return pucan_write_cmd(priv);
157 }
158 
159 static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
160 {
161 	struct pucan_std_filter *cmd;
162 
163 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
164 
165 	/* all the 11-bits CAN ID values are represented by one bit in a
166 	 * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
167 	 * row while the lowest 5 bits select the bit in that row.
168 	 *
169 	 * bit	filter
170 	 * 1	passed
171 	 * 0	discarded
172 	 */
173 
174 	/* select the row */
175 	cmd->idx = row;
176 
177 	/* set/unset bits in the row */
178 	cmd->mask = cpu_to_le32(mask);
179 
180 	return pucan_write_cmd(priv);
181 }
182 
183 static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
184 {
185 	struct pucan_tx_abort *cmd;
186 
187 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
188 
189 	cmd->flags = cpu_to_le16(flags);
190 
191 	return pucan_write_cmd(priv);
192 }
193 
194 static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
195 {
196 	struct pucan_wr_err_cnt *cmd;
197 
198 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
199 
200 	cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
201 	cmd->tx_counter = 0;
202 	cmd->rx_counter = 0;
203 
204 	return pucan_write_cmd(priv);
205 }
206 
207 static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
208 {
209 	struct pucan_options *cmd;
210 
211 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
212 
213 	cmd->options = cpu_to_le16(opt_mask);
214 
215 	return pucan_write_cmd(priv);
216 }
217 
218 static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
219 {
220 	struct pucan_options *cmd;
221 
222 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
223 
224 	cmd->options = cpu_to_le16(opt_mask);
225 
226 	return pucan_write_cmd(priv);
227 }
228 
229 static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
230 {
231 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
232 
233 	return pucan_write_cmd(priv);
234 }
235 
236 static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
237 {
238 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
239 	u64 ts_us;
240 
241 	ts_us = (u64)le32_to_cpu(ts_high) << 32;
242 	ts_us |= le32_to_cpu(ts_low);
243 
244 	/* IP core timestamps are µs. */
245 	hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
246 
247 	return netif_rx(skb);
248 }
249 
250 /* handle the reception of one CAN frame */
251 static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
252 			       struct pucan_rx_msg *msg)
253 {
254 	struct net_device_stats *stats = &priv->ndev->stats;
255 	struct canfd_frame *cf;
256 	struct sk_buff *skb;
257 	const u16 rx_msg_flags = le16_to_cpu(msg->flags);
258 	u8 cf_len;
259 
260 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
261 		cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg));
262 	else
263 		cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg));
264 
265 	/* if this frame is an echo, */
266 	if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
267 		unsigned long flags;
268 
269 		spin_lock_irqsave(&priv->echo_lock, flags);
270 
271 		/* count bytes of the echo instead of skb */
272 		stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
273 		stats->tx_packets++;
274 
275 		/* restart tx queue (a slot is free) */
276 		netif_wake_queue(priv->ndev);
277 
278 		spin_unlock_irqrestore(&priv->echo_lock, flags);
279 
280 		/* if this frame is only an echo, stop here. Otherwise,
281 		 * continue to push this application self-received frame into
282 		 * its own rx queue.
283 		 */
284 		if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
285 			return 0;
286 	}
287 
288 	/* otherwise, it should be pushed into rx fifo */
289 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
290 		/* CANFD frame case */
291 		skb = alloc_canfd_skb(priv->ndev, &cf);
292 		if (!skb)
293 			return -ENOMEM;
294 
295 		if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
296 			cf->flags |= CANFD_BRS;
297 
298 		if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
299 			cf->flags |= CANFD_ESI;
300 	} else {
301 		/* CAN 2.0 frame case */
302 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
303 		if (!skb)
304 			return -ENOMEM;
305 	}
306 
307 	cf->can_id = le32_to_cpu(msg->can_id);
308 	cf->len = cf_len;
309 
310 	if (rx_msg_flags & PUCAN_MSG_EXT_ID)
311 		cf->can_id |= CAN_EFF_FLAG;
312 
313 	if (rx_msg_flags & PUCAN_MSG_RTR) {
314 		cf->can_id |= CAN_RTR_FLAG;
315 	} else {
316 		memcpy(cf->data, msg->d, cf->len);
317 
318 		stats->rx_bytes += cf->len;
319 	}
320 	stats->rx_packets++;
321 
322 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
323 
324 	return 0;
325 }
326 
327 /* handle rx/tx error counters notification */
328 static int pucan_handle_error(struct peak_canfd_priv *priv,
329 			      struct pucan_error_msg *msg)
330 {
331 	priv->bec.txerr = msg->tx_err_cnt;
332 	priv->bec.rxerr = msg->rx_err_cnt;
333 
334 	return 0;
335 }
336 
337 /* handle status notification */
338 static int pucan_handle_status(struct peak_canfd_priv *priv,
339 			       struct pucan_status_msg *msg)
340 {
341 	struct net_device *ndev = priv->ndev;
342 	struct net_device_stats *stats = &ndev->stats;
343 	struct can_frame *cf;
344 	struct sk_buff *skb;
345 
346 	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
347 	if (pucan_status_is_rx_barrier(msg)) {
348 		if (priv->enable_tx_path) {
349 			int err = priv->enable_tx_path(priv);
350 
351 			if (err)
352 				return err;
353 		}
354 
355 		/* wake network queue up (echo_skb array is empty) */
356 		netif_wake_queue(ndev);
357 
358 		return 0;
359 	}
360 
361 	skb = alloc_can_err_skb(ndev, &cf);
362 
363 	/* test state error bits according to their priority */
364 	if (pucan_status_is_busoff(msg)) {
365 		netdev_dbg(ndev, "Bus-off entry status\n");
366 		priv->can.state = CAN_STATE_BUS_OFF;
367 		priv->can.can_stats.bus_off++;
368 		can_bus_off(ndev);
369 		if (skb)
370 			cf->can_id |= CAN_ERR_BUSOFF;
371 
372 	} else if (pucan_status_is_passive(msg)) {
373 		netdev_dbg(ndev, "Error passive status\n");
374 		priv->can.state = CAN_STATE_ERROR_PASSIVE;
375 		priv->can.can_stats.error_passive++;
376 		if (skb) {
377 			cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
378 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
379 					CAN_ERR_CRTL_TX_PASSIVE :
380 					CAN_ERR_CRTL_RX_PASSIVE;
381 			cf->data[6] = priv->bec.txerr;
382 			cf->data[7] = priv->bec.rxerr;
383 		}
384 
385 	} else if (pucan_status_is_warning(msg)) {
386 		netdev_dbg(ndev, "Error warning status\n");
387 		priv->can.state = CAN_STATE_ERROR_WARNING;
388 		priv->can.can_stats.error_warning++;
389 		if (skb) {
390 			cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
391 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
392 					CAN_ERR_CRTL_TX_WARNING :
393 					CAN_ERR_CRTL_RX_WARNING;
394 			cf->data[6] = priv->bec.txerr;
395 			cf->data[7] = priv->bec.rxerr;
396 		}
397 
398 	} else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
399 		/* back to ERROR_ACTIVE */
400 		netdev_dbg(ndev, "Error active status\n");
401 		can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
402 				 CAN_STATE_ERROR_ACTIVE);
403 	} else {
404 		dev_kfree_skb(skb);
405 		return 0;
406 	}
407 
408 	if (!skb) {
409 		stats->rx_dropped++;
410 		return -ENOMEM;
411 	}
412 
413 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
414 
415 	return 0;
416 }
417 
418 /* handle uCAN Rx overflow notification */
419 static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
420 {
421 	struct net_device_stats *stats = &priv->ndev->stats;
422 	struct can_frame *cf;
423 	struct sk_buff *skb;
424 
425 	stats->rx_over_errors++;
426 	stats->rx_errors++;
427 
428 	skb = alloc_can_err_skb(priv->ndev, &cf);
429 	if (!skb) {
430 		stats->rx_dropped++;
431 		return -ENOMEM;
432 	}
433 
434 	cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
435 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
436 
437 	cf->data[6] = priv->bec.txerr;
438 	cf->data[7] = priv->bec.rxerr;
439 
440 	netif_rx(skb);
441 
442 	return 0;
443 }
444 
445 /* handle a single uCAN message */
446 int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
447 			  struct pucan_rx_msg *msg)
448 {
449 	u16 msg_type = le16_to_cpu(msg->type);
450 	int msg_size = le16_to_cpu(msg->size);
451 	int err;
452 
453 	if (!msg_size || !msg_type) {
454 		/* null packet found: end of list */
455 		goto exit;
456 	}
457 
458 	switch (msg_type) {
459 	case PUCAN_MSG_CAN_RX:
460 		err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
461 		break;
462 	case PUCAN_MSG_ERROR:
463 		err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
464 		break;
465 	case PUCAN_MSG_STATUS:
466 		err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
467 		break;
468 	case PUCAN_MSG_CACHE_CRITICAL:
469 		err = pucan_handle_cache_critical(priv);
470 		break;
471 	default:
472 		err = 0;
473 	}
474 
475 	if (err < 0)
476 		return err;
477 
478 exit:
479 	return msg_size;
480 }
481 
482 /* handle a list of rx_count messages from rx_msg memory address */
483 int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
484 				struct pucan_rx_msg *msg_list, int msg_count)
485 {
486 	void *msg_ptr = msg_list;
487 	int i, msg_size = 0;
488 
489 	for (i = 0; i < msg_count; i++) {
490 		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
491 
492 		/* a null packet can be found at the end of a list */
493 		if (msg_size <= 0)
494 			break;
495 
496 		msg_ptr += ALIGN(msg_size, 4);
497 	}
498 
499 	if (msg_size < 0)
500 		return msg_size;
501 
502 	return i;
503 }
504 
505 static int peak_canfd_start(struct peak_canfd_priv *priv)
506 {
507 	int err;
508 
509 	err = pucan_clr_err_counters(priv);
510 	if (err)
511 		goto err_exit;
512 
513 	priv->echo_idx = 0;
514 
515 	priv->bec.txerr = 0;
516 	priv->bec.rxerr = 0;
517 
518 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
519 		err = pucan_set_listen_only_mode(priv);
520 	else
521 		err = pucan_set_normal_mode(priv);
522 
523 err_exit:
524 	return err;
525 }
526 
527 static void peak_canfd_stop(struct peak_canfd_priv *priv)
528 {
529 	int err;
530 
531 	/* go back to RESET mode */
532 	err = pucan_set_reset_mode(priv);
533 	if (err) {
534 		netdev_err(priv->ndev, "channel %u reset failed\n",
535 			   priv->index);
536 	} else {
537 		/* abort last Tx (MUST be done in RESET mode only!) */
538 		pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
539 	}
540 }
541 
542 static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
543 {
544 	struct peak_canfd_priv *priv = netdev_priv(ndev);
545 
546 	switch (mode) {
547 	case CAN_MODE_START:
548 		peak_canfd_start(priv);
549 		netif_wake_queue(ndev);
550 		break;
551 	default:
552 		return -EOPNOTSUPP;
553 	}
554 
555 	return 0;
556 }
557 
558 static int peak_canfd_get_berr_counter(const struct net_device *ndev,
559 				       struct can_berr_counter *bec)
560 {
561 	struct peak_canfd_priv *priv = netdev_priv(ndev);
562 
563 	*bec = priv->bec;
564 	return 0;
565 }
566 
567 static int peak_canfd_open(struct net_device *ndev)
568 {
569 	struct peak_canfd_priv *priv = netdev_priv(ndev);
570 	int i, err = 0;
571 
572 	err = open_candev(ndev);
573 	if (err) {
574 		netdev_err(ndev, "open_candev() failed, error %d\n", err);
575 		goto err_exit;
576 	}
577 
578 	err = pucan_set_reset_mode(priv);
579 	if (err)
580 		goto err_close;
581 
582 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
583 		if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
584 			err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
585 		else
586 			err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
587 
588 		if (err)
589 			goto err_close;
590 	}
591 
592 	/* set option: get rx/tx error counters */
593 	err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
594 	if (err)
595 		goto err_close;
596 
597 	/* accept all standard CAN ID */
598 	for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
599 		pucan_set_std_filter(priv, i, 0xffffffff);
600 
601 	err = peak_canfd_start(priv);
602 	if (err)
603 		goto err_close;
604 
605 	/* receiving the RB status says when Tx path is ready */
606 	err = pucan_setup_rx_barrier(priv);
607 	if (!err)
608 		goto err_exit;
609 
610 err_close:
611 	close_candev(ndev);
612 err_exit:
613 	return err;
614 }
615 
616 static int peak_canfd_set_bittiming(struct net_device *ndev)
617 {
618 	struct peak_canfd_priv *priv = netdev_priv(ndev);
619 
620 	return pucan_set_timing_slow(priv, &priv->can.bittiming);
621 }
622 
623 static int peak_canfd_set_data_bittiming(struct net_device *ndev)
624 {
625 	struct peak_canfd_priv *priv = netdev_priv(ndev);
626 
627 	return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
628 }
629 
630 static int peak_canfd_close(struct net_device *ndev)
631 {
632 	struct peak_canfd_priv *priv = netdev_priv(ndev);
633 
634 	netif_stop_queue(ndev);
635 	peak_canfd_stop(priv);
636 	close_candev(ndev);
637 
638 	return 0;
639 }
640 
641 static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
642 					 struct net_device *ndev)
643 {
644 	struct peak_canfd_priv *priv = netdev_priv(ndev);
645 	struct net_device_stats *stats = &ndev->stats;
646 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
647 	struct pucan_tx_msg *msg;
648 	u16 msg_size, msg_flags;
649 	unsigned long flags;
650 	bool should_stop_tx_queue;
651 	int room_left;
652 	u8 len;
653 
654 	if (can_dropped_invalid_skb(ndev, skb))
655 		return NETDEV_TX_OK;
656 
657 	msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
658 	msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
659 
660 	/* should never happen except under bus-off condition and (auto-)restart
661 	 * mechanism
662 	 */
663 	if (!msg) {
664 		stats->tx_dropped++;
665 		netif_stop_queue(ndev);
666 		return NETDEV_TX_BUSY;
667 	}
668 
669 	msg->size = cpu_to_le16(msg_size);
670 	msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
671 	msg_flags = 0;
672 
673 	if (cf->can_id & CAN_EFF_FLAG) {
674 		msg_flags |= PUCAN_MSG_EXT_ID;
675 		msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
676 	} else {
677 		msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
678 	}
679 
680 	if (can_is_canfd_skb(skb)) {
681 		/* CAN FD frame format */
682 		len = can_fd_len2dlc(cf->len);
683 
684 		msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
685 
686 		if (cf->flags & CANFD_BRS)
687 			msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
688 
689 		if (cf->flags & CANFD_ESI)
690 			msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
691 	} else {
692 		/* CAN 2.0 frame format */
693 		len = cf->len;
694 
695 		if (cf->can_id & CAN_RTR_FLAG)
696 			msg_flags |= PUCAN_MSG_RTR;
697 	}
698 
699 	/* always ask loopback for echo management */
700 	msg_flags |= PUCAN_MSG_LOOPED_BACK;
701 
702 	/* set driver specific bit to differentiate with application loopback */
703 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
704 		msg_flags |= PUCAN_MSG_SELF_RECEIVE;
705 
706 	msg->flags = cpu_to_le16(msg_flags);
707 	msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len);
708 	memcpy(msg->d, cf->data, cf->len);
709 
710 	/* struct msg client field is used as an index in the echo skbs ring */
711 	msg->client = priv->echo_idx;
712 
713 	spin_lock_irqsave(&priv->echo_lock, flags);
714 
715 	/* prepare and save echo skb in internal slot */
716 	can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
717 
718 	/* move echo index to the next slot */
719 	priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
720 
721 	/* if next slot is not free, stop network queue (no slot free in echo
722 	 * skb ring means that the controller did not write these frames on
723 	 * the bus: no need to continue).
724 	 */
725 	should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
726 
727 	/* stop network tx queue if not enough room to save one more msg too */
728 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
729 		should_stop_tx_queue |= (room_left <
730 					(sizeof(*msg) + CANFD_MAX_DLEN));
731 	else
732 		should_stop_tx_queue |= (room_left <
733 					(sizeof(*msg) + CAN_MAX_DLEN));
734 
735 	if (should_stop_tx_queue)
736 		netif_stop_queue(ndev);
737 
738 	spin_unlock_irqrestore(&priv->echo_lock, flags);
739 
740 	/* write the skb on the interface */
741 	priv->write_tx_msg(priv, msg);
742 
743 	return NETDEV_TX_OK;
744 }
745 
746 static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
747 {
748 	struct hwtstamp_config hwts_cfg = { 0 };
749 
750 	switch (cmd) {
751 	case SIOCSHWTSTAMP: /* set */
752 		if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
753 			return -EFAULT;
754 		if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
755 		    hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
756 			return 0;
757 		return -ERANGE;
758 
759 	case SIOCGHWTSTAMP: /* get */
760 		hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
761 		hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
762 		if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
763 			return -EFAULT;
764 		return 0;
765 
766 	default:
767 		return -EOPNOTSUPP;
768 	}
769 }
770 
771 static const struct net_device_ops peak_canfd_netdev_ops = {
772 	.ndo_open = peak_canfd_open,
773 	.ndo_stop = peak_canfd_close,
774 	.ndo_eth_ioctl = peak_eth_ioctl,
775 	.ndo_start_xmit = peak_canfd_start_xmit,
776 	.ndo_change_mtu = can_change_mtu,
777 };
778 
779 static int peak_get_ts_info(struct net_device *dev,
780 			    struct ethtool_ts_info *info)
781 {
782 	info->so_timestamping =
783 		SOF_TIMESTAMPING_TX_SOFTWARE |
784 		SOF_TIMESTAMPING_RX_SOFTWARE |
785 		SOF_TIMESTAMPING_SOFTWARE |
786 		SOF_TIMESTAMPING_RX_HARDWARE |
787 		SOF_TIMESTAMPING_RAW_HARDWARE;
788 	info->phc_index = -1;
789 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
790 	info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
791 
792 	return 0;
793 }
794 
795 static const struct ethtool_ops peak_canfd_ethtool_ops = {
796 	.get_ts_info = peak_get_ts_info,
797 };
798 
799 struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
800 					int echo_skb_max)
801 {
802 	struct net_device *ndev;
803 	struct peak_canfd_priv *priv;
804 
805 	/* we DO support local echo */
806 	if (echo_skb_max < 0)
807 		echo_skb_max = PCANFD_ECHO_SKB_MAX;
808 
809 	/* allocate the candev object */
810 	ndev = alloc_candev(sizeof_priv, echo_skb_max);
811 	if (!ndev)
812 		return NULL;
813 
814 	priv = netdev_priv(ndev);
815 
816 	/* complete now socket-can initialization side */
817 	priv->can.state = CAN_STATE_STOPPED;
818 	priv->can.bittiming_const = &peak_canfd_nominal_const;
819 	priv->can.data_bittiming_const = &peak_canfd_data_const;
820 
821 	priv->can.do_set_mode = peak_canfd_set_mode;
822 	priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
823 	priv->can.do_set_bittiming = peak_canfd_set_bittiming;
824 	priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
825 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
826 				       CAN_CTRLMODE_LISTENONLY |
827 				       CAN_CTRLMODE_3_SAMPLES |
828 				       CAN_CTRLMODE_FD |
829 				       CAN_CTRLMODE_FD_NON_ISO |
830 				       CAN_CTRLMODE_BERR_REPORTING;
831 
832 	priv->ndev = ndev;
833 	priv->index = index;
834 	priv->cmd_len = 0;
835 	spin_lock_init(&priv->echo_lock);
836 
837 	ndev->flags |= IFF_ECHO;
838 	ndev->netdev_ops = &peak_canfd_netdev_ops;
839 	ndev->ethtool_ops = &peak_canfd_ethtool_ops;
840 	ndev->dev_id = index;
841 
842 	return ndev;
843 }
844