xref: /linux/drivers/staging/most/net/net.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * net.c - Networking component for Mostcore
4  *
5  * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include <linux/most.h>
19 
20 #define MEP_HDR_LEN 8
21 #define MDP_HDR_LEN 16
22 #define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
23 
24 #define PMHL 5
25 
26 #define PMS_TELID_UNSEGM_MAMAC	0x0A
27 #define PMS_FIFONO_MDP		0x01
28 #define PMS_FIFONO_MEP		0x04
29 #define PMS_MSGTYPE_DATA	0x04
30 #define PMS_DEF_PRIO		0
31 #define MEP_DEF_RETRY		15
32 
33 #define PMS_FIFONO_MASK		0x07
34 #define PMS_FIFONO_SHIFT	3
35 #define PMS_RETRY_SHIFT		4
36 #define PMS_TELID_MASK		0x0F
37 #define PMS_TELID_SHIFT		4
38 
39 #define HB(value)		((u8)((u16)(value) >> 8))
40 #define LB(value)		((u8)(value))
41 
42 #define EXTRACT_BIT_SET(bitset_name, value) \
43 	(((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
44 
45 #define PMS_IS_MEP(buf, len) \
46 	((len) > MEP_HDR_LEN && \
47 	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
48 
49 static inline bool pms_is_mamac(char *buf, u32 len)
50 {
51 	return (len > MDP_HDR_LEN &&
52 		EXTRACT_BIT_SET(PMS_FIFONO, buf[3]) == PMS_FIFONO_MDP &&
53 		EXTRACT_BIT_SET(PMS_TELID, buf[14]) == PMS_TELID_UNSEGM_MAMAC);
54 }
55 
56 struct net_dev_channel {
57 	bool linked;
58 	int ch_id;
59 };
60 
61 struct net_dev_context {
62 	struct most_interface *iface;
63 	bool is_mamac;
64 	struct net_device *dev;
65 	struct net_dev_channel rx;
66 	struct net_dev_channel tx;
67 	struct list_head list;
68 };
69 
70 static LIST_HEAD(net_devices);
71 static DEFINE_MUTEX(probe_disc_mt); /* ch->linked = true, most_nd_open */
72 static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
73 static struct most_component comp;
74 
75 static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
76 {
77 	u8 *buff = mbo->virt_address;
78 	static const u8 broadcast[] = { 0x03, 0xFF };
79 	const u8 *dest_addr = skb->data + 4;
80 	const u8 *eth_type = skb->data + 12;
81 	unsigned int payload_len = skb->len - ETH_HLEN;
82 	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
83 
84 	if (mdp_len < skb->len) {
85 		pr_err("drop: too large packet! (%u)\n", skb->len);
86 		return -EINVAL;
87 	}
88 
89 	if (mbo->buffer_length < mdp_len) {
90 		pr_err("drop: too small buffer! (%d for %d)\n",
91 		       mbo->buffer_length, mdp_len);
92 		return -EINVAL;
93 	}
94 
95 	if (skb->len < ETH_HLEN) {
96 		pr_err("drop: too small packet! (%d)\n", skb->len);
97 		return -EINVAL;
98 	}
99 
100 	if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
101 		dest_addr = broadcast;
102 
103 	*buff++ = HB(mdp_len - 2);
104 	*buff++ = LB(mdp_len - 2);
105 
106 	*buff++ = PMHL;
107 	*buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
108 	*buff++ = PMS_DEF_PRIO;
109 	*buff++ = dest_addr[0];
110 	*buff++ = dest_addr[1];
111 	*buff++ = 0x00;
112 
113 	*buff++ = HB(payload_len + 6);
114 	*buff++ = LB(payload_len + 6);
115 
116 	/* end of FPH here */
117 
118 	*buff++ = eth_type[0];
119 	*buff++ = eth_type[1];
120 	*buff++ = 0;
121 	*buff++ = 0;
122 
123 	*buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
124 	*buff++ = LB(payload_len);
125 
126 	memcpy(buff, skb->data + ETH_HLEN, payload_len);
127 	mbo->buffer_length = mdp_len;
128 	return 0;
129 }
130 
131 static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
132 {
133 	u8 *buff = mbo->virt_address;
134 	unsigned int mep_len = skb->len + MEP_HDR_LEN;
135 
136 	if (mep_len < skb->len) {
137 		pr_err("drop: too large packet! (%u)\n", skb->len);
138 		return -EINVAL;
139 	}
140 
141 	if (mbo->buffer_length < mep_len) {
142 		pr_err("drop: too small buffer! (%d for %d)\n",
143 		       mbo->buffer_length, mep_len);
144 		return -EINVAL;
145 	}
146 
147 	*buff++ = HB(mep_len - 2);
148 	*buff++ = LB(mep_len - 2);
149 
150 	*buff++ = PMHL;
151 	*buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
152 	*buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
153 	*buff++ = 0;
154 	*buff++ = 0;
155 	*buff++ = 0;
156 
157 	memcpy(buff, skb->data, skb->len);
158 	mbo->buffer_length = mep_len;
159 	return 0;
160 }
161 
162 static int most_nd_set_mac_address(struct net_device *dev, void *p)
163 {
164 	struct net_dev_context *nd = netdev_priv(dev);
165 	int err = eth_mac_addr(dev, p);
166 
167 	if (err)
168 		return err;
169 
170 	nd->is_mamac =
171 		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
172 		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
173 
174 	/*
175 	 * Set default MTU for the given packet type.
176 	 * It is still possible to change MTU using ip tools afterwards.
177 	 */
178 	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
179 
180 	return 0;
181 }
182 
183 static void on_netinfo(struct most_interface *iface,
184 		       unsigned char link_stat, unsigned char *mac_addr);
185 
186 static int most_nd_open(struct net_device *dev)
187 {
188 	struct net_dev_context *nd = netdev_priv(dev);
189 	int ret = 0;
190 
191 	mutex_lock(&probe_disc_mt);
192 
193 	if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
194 		netdev_err(dev, "most_start_channel() failed\n");
195 		ret = -EBUSY;
196 		goto unlock;
197 	}
198 
199 	if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
200 		netdev_err(dev, "most_start_channel() failed\n");
201 		most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
202 		ret = -EBUSY;
203 		goto unlock;
204 	}
205 
206 	netif_carrier_off(dev);
207 	if (is_valid_ether_addr(dev->dev_addr))
208 		netif_dormant_off(dev);
209 	else
210 		netif_dormant_on(dev);
211 	netif_wake_queue(dev);
212 	if (nd->iface->request_netinfo)
213 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
214 
215 unlock:
216 	mutex_unlock(&probe_disc_mt);
217 	return ret;
218 }
219 
220 static int most_nd_stop(struct net_device *dev)
221 {
222 	struct net_dev_context *nd = netdev_priv(dev);
223 
224 	netif_stop_queue(dev);
225 	if (nd->iface->request_netinfo)
226 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
227 	most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
228 	most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
229 
230 	return 0;
231 }
232 
233 static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
234 				      struct net_device *dev)
235 {
236 	struct net_dev_context *nd = netdev_priv(dev);
237 	struct mbo *mbo;
238 	int ret;
239 
240 	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
241 
242 	if (!mbo) {
243 		netif_stop_queue(dev);
244 		dev->stats.tx_fifo_errors++;
245 		return NETDEV_TX_BUSY;
246 	}
247 
248 	if (nd->is_mamac)
249 		ret = skb_to_mamac(skb, mbo);
250 	else
251 		ret = skb_to_mep(skb, mbo);
252 
253 	if (ret) {
254 		most_put_mbo(mbo);
255 		dev->stats.tx_dropped++;
256 		kfree_skb(skb);
257 		return NETDEV_TX_OK;
258 	}
259 
260 	most_submit_mbo(mbo);
261 	dev->stats.tx_packets++;
262 	dev->stats.tx_bytes += skb->len;
263 	kfree_skb(skb);
264 	return NETDEV_TX_OK;
265 }
266 
267 static const struct net_device_ops most_nd_ops = {
268 	.ndo_open = most_nd_open,
269 	.ndo_stop = most_nd_stop,
270 	.ndo_start_xmit = most_nd_start_xmit,
271 	.ndo_set_mac_address = most_nd_set_mac_address,
272 };
273 
274 static void most_nd_setup(struct net_device *dev)
275 {
276 	ether_setup(dev);
277 	dev->netdev_ops = &most_nd_ops;
278 }
279 
280 static struct net_dev_context *get_net_dev(struct most_interface *iface)
281 {
282 	struct net_dev_context *nd;
283 
284 	list_for_each_entry(nd, &net_devices, list)
285 		if (nd->iface == iface)
286 			return nd;
287 	return NULL;
288 }
289 
290 static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
291 {
292 	struct net_dev_context *nd;
293 	unsigned long flags;
294 
295 	spin_lock_irqsave(&list_lock, flags);
296 	nd = get_net_dev(iface);
297 	if (nd && nd->rx.linked && nd->tx.linked)
298 		dev_hold(nd->dev);
299 	else
300 		nd = NULL;
301 	spin_unlock_irqrestore(&list_lock, flags);
302 	return nd;
303 }
304 
305 static int comp_probe_channel(struct most_interface *iface, int channel_idx,
306 			      struct most_channel_config *ccfg, char *name,
307 			      char *args)
308 {
309 	struct net_dev_context *nd;
310 	struct net_dev_channel *ch;
311 	struct net_device *dev;
312 	unsigned long flags;
313 	int ret = 0;
314 
315 	if (!iface)
316 		return -EINVAL;
317 
318 	if (ccfg->data_type != MOST_CH_ASYNC)
319 		return -EINVAL;
320 
321 	mutex_lock(&probe_disc_mt);
322 	nd = get_net_dev(iface);
323 	if (!nd) {
324 		dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
325 				   NET_NAME_UNKNOWN, most_nd_setup);
326 		if (!dev) {
327 			ret = -ENOMEM;
328 			goto unlock;
329 		}
330 
331 		nd = netdev_priv(dev);
332 		nd->iface = iface;
333 		nd->dev = dev;
334 
335 		spin_lock_irqsave(&list_lock, flags);
336 		list_add(&nd->list, &net_devices);
337 		spin_unlock_irqrestore(&list_lock, flags);
338 
339 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
340 	} else {
341 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
342 		if (ch->linked) {
343 			pr_err("direction is allocated\n");
344 			ret = -EINVAL;
345 			goto unlock;
346 		}
347 
348 		if (register_netdev(nd->dev)) {
349 			pr_err("register_netdev() failed\n");
350 			ret = -EINVAL;
351 			goto unlock;
352 		}
353 	}
354 	ch->ch_id = channel_idx;
355 	ch->linked = true;
356 
357 unlock:
358 	mutex_unlock(&probe_disc_mt);
359 	return ret;
360 }
361 
362 static int comp_disconnect_channel(struct most_interface *iface,
363 				   int channel_idx)
364 {
365 	struct net_dev_context *nd;
366 	struct net_dev_channel *ch;
367 	unsigned long flags;
368 	int ret = 0;
369 
370 	mutex_lock(&probe_disc_mt);
371 	nd = get_net_dev(iface);
372 	if (!nd) {
373 		ret = -EINVAL;
374 		goto unlock;
375 	}
376 
377 	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
378 		ch = &nd->rx;
379 	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
380 		ch = &nd->tx;
381 	} else {
382 		ret = -EINVAL;
383 		goto unlock;
384 	}
385 
386 	if (nd->rx.linked && nd->tx.linked) {
387 		spin_lock_irqsave(&list_lock, flags);
388 		ch->linked = false;
389 		spin_unlock_irqrestore(&list_lock, flags);
390 
391 		/*
392 		 * do not call most_stop_channel() here, because channels are
393 		 * going to be closed in ndo_stop() after unregister_netdev()
394 		 */
395 		unregister_netdev(nd->dev);
396 	} else {
397 		spin_lock_irqsave(&list_lock, flags);
398 		list_del(&nd->list);
399 		spin_unlock_irqrestore(&list_lock, flags);
400 
401 		free_netdev(nd->dev);
402 	}
403 
404 unlock:
405 	mutex_unlock(&probe_disc_mt);
406 	return ret;
407 }
408 
409 static int comp_resume_tx_channel(struct most_interface *iface,
410 				  int channel_idx)
411 {
412 	struct net_dev_context *nd;
413 
414 	nd = get_net_dev_hold(iface);
415 	if (!nd)
416 		return 0;
417 
418 	if (nd->tx.ch_id != channel_idx)
419 		goto put_nd;
420 
421 	netif_wake_queue(nd->dev);
422 
423 put_nd:
424 	dev_put(nd->dev);
425 	return 0;
426 }
427 
428 static int comp_rx_data(struct mbo *mbo)
429 {
430 	const u32 zero = 0;
431 	struct net_dev_context *nd;
432 	char *buf = mbo->virt_address;
433 	u32 len = mbo->processed_length;
434 	struct sk_buff *skb;
435 	struct net_device *dev;
436 	unsigned int skb_len;
437 	int ret = 0;
438 
439 	nd = get_net_dev_hold(mbo->ifp);
440 	if (!nd)
441 		return -EIO;
442 
443 	if (nd->rx.ch_id != mbo->hdm_channel_id) {
444 		ret = -EIO;
445 		goto put_nd;
446 	}
447 
448 	dev = nd->dev;
449 
450 	if (nd->is_mamac) {
451 		if (!pms_is_mamac(buf, len)) {
452 			ret = -EIO;
453 			goto put_nd;
454 		}
455 
456 		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
457 	} else {
458 		if (!PMS_IS_MEP(buf, len)) {
459 			ret = -EIO;
460 			goto put_nd;
461 		}
462 
463 		skb = dev_alloc_skb(len - MEP_HDR_LEN);
464 	}
465 
466 	if (!skb) {
467 		dev->stats.rx_dropped++;
468 		pr_err_once("drop packet: no memory for skb\n");
469 		goto out;
470 	}
471 
472 	skb->dev = dev;
473 
474 	if (nd->is_mamac) {
475 		/* dest */
476 		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
477 
478 		/* src */
479 		skb_put_data(skb, &zero, 4);
480 		skb_put_data(skb, buf + 5, 2);
481 
482 		/* eth type */
483 		skb_put_data(skb, buf + 10, 2);
484 
485 		buf += MDP_HDR_LEN;
486 		len -= MDP_HDR_LEN;
487 	} else {
488 		buf += MEP_HDR_LEN;
489 		len -= MEP_HDR_LEN;
490 	}
491 
492 	skb_put_data(skb, buf, len);
493 	skb->protocol = eth_type_trans(skb, dev);
494 	skb_len = skb->len;
495 	if (netif_rx(skb) == NET_RX_SUCCESS) {
496 		dev->stats.rx_packets++;
497 		dev->stats.rx_bytes += skb_len;
498 	} else {
499 		dev->stats.rx_dropped++;
500 	}
501 
502 out:
503 	most_put_mbo(mbo);
504 
505 put_nd:
506 	dev_put(nd->dev);
507 	return ret;
508 }
509 
510 static struct most_component comp = {
511 	.mod = THIS_MODULE,
512 	.name = "net",
513 	.probe_channel = comp_probe_channel,
514 	.disconnect_channel = comp_disconnect_channel,
515 	.tx_completion = comp_resume_tx_channel,
516 	.rx_completion = comp_rx_data,
517 };
518 
519 static int __init most_net_init(void)
520 {
521 	int err;
522 
523 	err = most_register_component(&comp);
524 	if (err)
525 		return err;
526 	err = most_register_configfs_subsys(&comp);
527 	if (err) {
528 		most_deregister_component(&comp);
529 		return err;
530 	}
531 	return 0;
532 }
533 
534 static void __exit most_net_exit(void)
535 {
536 	most_deregister_configfs_subsys(&comp);
537 	most_deregister_component(&comp);
538 }
539 
540 /**
541  * on_netinfo - callback for HDM to be informed about HW's MAC
542  * @iface: most interface instance
543  * @link_stat: link status
544  * @mac_addr: MAC address
545  */
546 static void on_netinfo(struct most_interface *iface,
547 		       unsigned char link_stat, unsigned char *mac_addr)
548 {
549 	struct net_dev_context *nd;
550 	struct net_device *dev;
551 	const u8 *m = mac_addr;
552 
553 	nd = get_net_dev_hold(iface);
554 	if (!nd)
555 		return;
556 
557 	dev = nd->dev;
558 
559 	if (link_stat)
560 		netif_carrier_on(dev);
561 	else
562 		netif_carrier_off(dev);
563 
564 	if (m && is_valid_ether_addr(m)) {
565 		if (!is_valid_ether_addr(dev->dev_addr)) {
566 			netdev_info(dev, "set mac %pM\n", m);
567 			eth_hw_addr_set(dev, m);
568 			netif_dormant_off(dev);
569 		} else if (!ether_addr_equal(dev->dev_addr, m)) {
570 			netdev_warn(dev, "reject mac %pM\n", m);
571 		}
572 	}
573 
574 	dev_put(nd->dev);
575 }
576 
577 module_init(most_net_init);
578 module_exit(most_net_exit);
579 MODULE_LICENSE("GPL");
580 MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
581 MODULE_DESCRIPTION("Networking Component Module for Mostcore");
582