1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vxcan.c - Virtual CAN Tunnel for cross namespace communication
4  *
5  * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6  * specific parts and from drivers/net/veth.c to implement the netlink API
7  * for network interface pairs in a common and established way.
8  *
9  * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_ether.h>
17 #include <linux/can.h>
18 #include <linux/can/dev.h>
19 #include <linux/can/skb.h>
20 #include <linux/can/vxcan.h>
21 #include <linux/can/can-ml.h>
22 #include <linux/slab.h>
23 #include <net/rtnetlink.h>
24 
25 #define DRV_NAME "vxcan"
26 
27 MODULE_DESCRIPTION("Virtual CAN Tunnel");
28 MODULE_LICENSE("GPL");
29 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
30 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
31 
32 struct vxcan_priv {
33 	struct net_device __rcu	*peer;
34 };
35 
vxcan_xmit(struct sk_buff * skb,struct net_device * dev)36 static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
37 {
38 	struct vxcan_priv *priv = netdev_priv(dev);
39 	struct net_device *peer;
40 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
41 	struct net_device_stats *peerstats, *srcstats = &dev->stats;
42 	u8 len;
43 
44 	if (can_dropped_invalid_skb(dev, skb))
45 		return NETDEV_TX_OK;
46 
47 	rcu_read_lock();
48 	peer = rcu_dereference(priv->peer);
49 	if (unlikely(!peer)) {
50 		kfree_skb(skb);
51 		dev->stats.tx_dropped++;
52 		goto out_unlock;
53 	}
54 
55 	skb = can_create_echo_skb(skb);
56 	if (!skb)
57 		goto out_unlock;
58 
59 	/* reset CAN GW hop counter */
60 	skb->csum_start = 0;
61 	skb->pkt_type   = PACKET_BROADCAST;
62 	skb->dev        = peer;
63 	skb->ip_summed  = CHECKSUM_UNNECESSARY;
64 
65 	len = cfd->len;
66 	if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
67 		srcstats->tx_packets++;
68 		srcstats->tx_bytes += len;
69 		peerstats = &peer->stats;
70 		peerstats->rx_packets++;
71 		peerstats->rx_bytes += len;
72 	}
73 
74 out_unlock:
75 	rcu_read_unlock();
76 	return NETDEV_TX_OK;
77 }
78 
79 
vxcan_open(struct net_device * dev)80 static int vxcan_open(struct net_device *dev)
81 {
82 	struct vxcan_priv *priv = netdev_priv(dev);
83 	struct net_device *peer = rtnl_dereference(priv->peer);
84 
85 	if (!peer)
86 		return -ENOTCONN;
87 
88 	if (peer->flags & IFF_UP) {
89 		netif_carrier_on(dev);
90 		netif_carrier_on(peer);
91 	}
92 	return 0;
93 }
94 
vxcan_close(struct net_device * dev)95 static int vxcan_close(struct net_device *dev)
96 {
97 	struct vxcan_priv *priv = netdev_priv(dev);
98 	struct net_device *peer = rtnl_dereference(priv->peer);
99 
100 	netif_carrier_off(dev);
101 	if (peer)
102 		netif_carrier_off(peer);
103 
104 	return 0;
105 }
106 
vxcan_get_iflink(const struct net_device * dev)107 static int vxcan_get_iflink(const struct net_device *dev)
108 {
109 	struct vxcan_priv *priv = netdev_priv(dev);
110 	struct net_device *peer;
111 	int iflink;
112 
113 	rcu_read_lock();
114 	peer = rcu_dereference(priv->peer);
115 	iflink = peer ? peer->ifindex : 0;
116 	rcu_read_unlock();
117 
118 	return iflink;
119 }
120 
vxcan_change_mtu(struct net_device * dev,int new_mtu)121 static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
122 {
123 	/* Do not allow changing the MTU while running */
124 	if (dev->flags & IFF_UP)
125 		return -EBUSY;
126 
127 	if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
128 		return -EINVAL;
129 
130 	dev->mtu = new_mtu;
131 	return 0;
132 }
133 
134 static const struct net_device_ops vxcan_netdev_ops = {
135 	.ndo_open	= vxcan_open,
136 	.ndo_stop	= vxcan_close,
137 	.ndo_start_xmit	= vxcan_xmit,
138 	.ndo_get_iflink	= vxcan_get_iflink,
139 	.ndo_change_mtu = vxcan_change_mtu,
140 };
141 
vxcan_setup(struct net_device * dev)142 static void vxcan_setup(struct net_device *dev)
143 {
144 	struct can_ml_priv *can_ml;
145 
146 	dev->type		= ARPHRD_CAN;
147 	dev->mtu		= CANFD_MTU;
148 	dev->hard_header_len	= 0;
149 	dev->addr_len		= 0;
150 	dev->tx_queue_len	= 0;
151 	dev->flags		= (IFF_NOARP|IFF_ECHO);
152 	dev->netdev_ops		= &vxcan_netdev_ops;
153 	dev->needs_free_netdev	= true;
154 
155 	can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
156 	can_set_ml_priv(dev, can_ml);
157 }
158 
159 /* forward declaration for rtnl_create_link() */
160 static struct rtnl_link_ops vxcan_link_ops;
161 
vxcan_newlink(struct net * net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)162 static int vxcan_newlink(struct net *net, struct net_device *dev,
163 			 struct nlattr *tb[], struct nlattr *data[],
164 			 struct netlink_ext_ack *extack)
165 {
166 	struct vxcan_priv *priv;
167 	struct net_device *peer;
168 	struct net *peer_net;
169 
170 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
171 	char ifname[IFNAMSIZ];
172 	unsigned char name_assign_type;
173 	struct ifinfomsg *ifmp = NULL;
174 	int err;
175 
176 	/* register peer device */
177 	if (data && data[VXCAN_INFO_PEER]) {
178 		struct nlattr *nla_peer;
179 
180 		nla_peer = data[VXCAN_INFO_PEER];
181 		ifmp = nla_data(nla_peer);
182 		err = rtnl_nla_parse_ifla(peer_tb,
183 					  nla_data(nla_peer) +
184 					  sizeof(struct ifinfomsg),
185 					  nla_len(nla_peer) -
186 					  sizeof(struct ifinfomsg),
187 					  NULL);
188 		if (err < 0)
189 			return err;
190 
191 		tbp = peer_tb;
192 	}
193 
194 	if (ifmp && tbp[IFLA_IFNAME]) {
195 		nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
196 		name_assign_type = NET_NAME_USER;
197 	} else {
198 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
199 		name_assign_type = NET_NAME_ENUM;
200 	}
201 
202 	peer_net = rtnl_link_get_net(net, tbp);
203 	if (IS_ERR(peer_net))
204 		return PTR_ERR(peer_net);
205 
206 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
207 				&vxcan_link_ops, tbp, extack);
208 	if (IS_ERR(peer)) {
209 		put_net(peer_net);
210 		return PTR_ERR(peer);
211 	}
212 
213 	if (ifmp && dev->ifindex)
214 		peer->ifindex = ifmp->ifi_index;
215 
216 	err = register_netdevice(peer);
217 	put_net(peer_net);
218 	peer_net = NULL;
219 	if (err < 0) {
220 		free_netdev(peer);
221 		return err;
222 	}
223 
224 	netif_carrier_off(peer);
225 
226 	err = rtnl_configure_link(peer, ifmp);
227 	if (err < 0)
228 		goto unregister_network_device;
229 
230 	/* register first device */
231 	if (tb[IFLA_IFNAME])
232 		nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
233 	else
234 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
235 
236 	err = register_netdevice(dev);
237 	if (err < 0)
238 		goto unregister_network_device;
239 
240 	netif_carrier_off(dev);
241 
242 	/* cross link the device pair */
243 	priv = netdev_priv(dev);
244 	rcu_assign_pointer(priv->peer, peer);
245 
246 	priv = netdev_priv(peer);
247 	rcu_assign_pointer(priv->peer, dev);
248 
249 	return 0;
250 
251 unregister_network_device:
252 	unregister_netdevice(peer);
253 	return err;
254 }
255 
vxcan_dellink(struct net_device * dev,struct list_head * head)256 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
257 {
258 	struct vxcan_priv *priv;
259 	struct net_device *peer;
260 
261 	priv = netdev_priv(dev);
262 	peer = rtnl_dereference(priv->peer);
263 
264 	/* Note : dellink() is called from default_device_exit_batch(),
265 	 * before a rcu_synchronize() point. The devices are guaranteed
266 	 * not being freed before one RCU grace period.
267 	 */
268 	RCU_INIT_POINTER(priv->peer, NULL);
269 	unregister_netdevice_queue(dev, head);
270 
271 	if (peer) {
272 		priv = netdev_priv(peer);
273 		RCU_INIT_POINTER(priv->peer, NULL);
274 		unregister_netdevice_queue(peer, head);
275 	}
276 }
277 
278 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
279 	[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
280 };
281 
vxcan_get_link_net(const struct net_device * dev)282 static struct net *vxcan_get_link_net(const struct net_device *dev)
283 {
284 	struct vxcan_priv *priv = netdev_priv(dev);
285 	struct net_device *peer = rtnl_dereference(priv->peer);
286 
287 	return peer ? dev_net(peer) : dev_net(dev);
288 }
289 
290 static struct rtnl_link_ops vxcan_link_ops = {
291 	.kind		= DRV_NAME,
292 	.priv_size	= ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
293 	.setup		= vxcan_setup,
294 	.newlink	= vxcan_newlink,
295 	.dellink	= vxcan_dellink,
296 	.policy		= vxcan_policy,
297 	.maxtype	= VXCAN_INFO_MAX,
298 	.get_link_net	= vxcan_get_link_net,
299 };
300 
vxcan_init(void)301 static __init int vxcan_init(void)
302 {
303 	pr_info("vxcan: Virtual CAN Tunnel driver\n");
304 
305 	return rtnl_link_register(&vxcan_link_ops);
306 }
307 
vxcan_exit(void)308 static __exit void vxcan_exit(void)
309 {
310 	rtnl_link_unregister(&vxcan_link_ops);
311 }
312 
313 module_init(vxcan_init);
314 module_exit(vxcan_exit);
315