xref: /linux/drivers/net/tun.c (revision 04958480)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  TUN - Universal TUN/TAP device driver.
4  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5  *
6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7  */
8 
9 /*
10  *  Changes:
11  *
12  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13  *    Add TUNSETLINK ioctl to set the link encapsulation
14  *
15  *  Mark Smith <markzzzsmith@yahoo.com.au>
16  *    Use eth_random_addr() for tap MAC address.
17  *
18  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
19  *    Fixes in packet dropping, queue length setting and queue wakeup.
20  *    Increased default tx queue length.
21  *    Added ethtool API.
22  *    Minor cleanups
23  *
24  *  Daniel Podlejski <underley@underley.eu.org>
25  *    Modifications for 2.3.99-pre5 kernel.
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #define DRV_NAME	"tun"
31 #define DRV_VERSION	"1.6"
32 #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
33 #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34 
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/sched/signal.h>
39 #include <linux/major.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/miscdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/compat.h>
51 #include <linux/if.h>
52 #include <linux/if_arp.h>
53 #include <linux/if_ether.h>
54 #include <linux/if_tun.h>
55 #include <linux/if_vlan.h>
56 #include <linux/crc32.h>
57 #include <linux/math.h>
58 #include <linux/nsproxy.h>
59 #include <linux/virtio_net.h>
60 #include <linux/rcupdate.h>
61 #include <net/net_namespace.h>
62 #include <net/netns/generic.h>
63 #include <net/rtnetlink.h>
64 #include <net/sock.h>
65 #include <net/xdp.h>
66 #include <net/ip_tunnels.h>
67 #include <linux/seq_file.h>
68 #include <linux/uio.h>
69 #include <linux/skb_array.h>
70 #include <linux/bpf.h>
71 #include <linux/bpf_trace.h>
72 #include <linux/mutex.h>
73 #include <linux/ieee802154.h>
74 #include <linux/if_ltalk.h>
75 #include <uapi/linux/if_fddi.h>
76 #include <uapi/linux/if_hippi.h>
77 #include <uapi/linux/if_fc.h>
78 #include <net/ax25.h>
79 #include <net/rose.h>
80 #include <net/6lowpan.h>
81 #include <net/rps.h>
82 
83 #include <linux/uaccess.h>
84 #include <linux/proc_fs.h>
85 
86 static void tun_default_link_ksettings(struct net_device *dev,
87 				       struct ethtool_link_ksettings *cmd);
88 
89 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
90 
91 /* TUN device flags */
92 
93 /* IFF_ATTACH_QUEUE is never stored in device flags,
94  * overload it to mean fasync when stored there.
95  */
96 #define TUN_FASYNC	IFF_ATTACH_QUEUE
97 /* High bits in flags field are unused. */
98 #define TUN_VNET_LE     0x80000000
99 #define TUN_VNET_BE     0x40000000
100 
101 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
102 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
103 
104 #define GOODCOPY_LEN 128
105 
106 #define FLT_EXACT_COUNT 8
107 struct tap_filter {
108 	unsigned int    count;    /* Number of addrs. Zero means disabled */
109 	u32             mask[2];  /* Mask of the hashed addrs */
110 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
111 };
112 
113 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
114  * to max number of VCPUs in guest. */
115 #define MAX_TAP_QUEUES 256
116 #define MAX_TAP_FLOWS  4096
117 
118 #define TUN_FLOW_EXPIRE (3 * HZ)
119 
120 /* A tun_file connects an open character device to a tuntap netdevice. It
121  * also contains all socket related structures (except sock_fprog and tap_filter)
122  * to serve as one transmit queue for tuntap device. The sock_fprog and
123  * tap_filter were kept in tun_struct since they were used for filtering for the
124  * netdevice not for a specific queue (at least I didn't see the requirement for
125  * this).
126  *
127  * RCU usage:
128  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129  * other can only be read while rcu_read_lock or rtnl_lock is held.
130  */
131 struct tun_file {
132 	struct sock sk;
133 	struct socket socket;
134 	struct tun_struct __rcu *tun;
135 	struct fasync_struct *fasync;
136 	/* only used for fasnyc */
137 	unsigned int flags;
138 	union {
139 		u16 queue_index;
140 		unsigned int ifindex;
141 	};
142 	struct napi_struct napi;
143 	bool napi_enabled;
144 	bool napi_frags_enabled;
145 	struct mutex napi_mutex;	/* Protects access to the above napi */
146 	struct list_head next;
147 	struct tun_struct *detached;
148 	struct ptr_ring tx_ring;
149 	struct xdp_rxq_info xdp_rxq;
150 };
151 
152 struct tun_page {
153 	struct page *page;
154 	int count;
155 };
156 
157 struct tun_flow_entry {
158 	struct hlist_node hash_link;
159 	struct rcu_head rcu;
160 	struct tun_struct *tun;
161 
162 	u32 rxhash;
163 	u32 rps_rxhash;
164 	int queue_index;
165 	unsigned long updated ____cacheline_aligned_in_smp;
166 };
167 
168 #define TUN_NUM_FLOW_ENTRIES 1024
169 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
170 
171 struct tun_prog {
172 	struct rcu_head rcu;
173 	struct bpf_prog *prog;
174 };
175 
176 /* Since the socket were moved to tun_file, to preserve the behavior of persist
177  * device, socket filter, sndbuf and vnet header size were restore when the
178  * file were attached to a persist device.
179  */
180 struct tun_struct {
181 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
182 	unsigned int            numqueues;
183 	unsigned int 		flags;
184 	kuid_t			owner;
185 	kgid_t			group;
186 
187 	struct net_device	*dev;
188 	netdev_features_t	set_features;
189 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 			  NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
191 
192 	int			align;
193 	int			vnet_hdr_sz;
194 	int			sndbuf;
195 	struct tap_filter	txflt;
196 	struct sock_fprog	fprog;
197 	/* protected by rtnl lock */
198 	bool			filter_attached;
199 	u32			msg_enable;
200 	spinlock_t lock;
201 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
202 	struct timer_list flow_gc_timer;
203 	unsigned long ageing_time;
204 	unsigned int numdisabled;
205 	struct list_head disabled;
206 	void *security;
207 	u32 flow_count;
208 	u32 rx_batched;
209 	atomic_long_t rx_frame_errors;
210 	struct bpf_prog __rcu *xdp_prog;
211 	struct tun_prog __rcu *steering_prog;
212 	struct tun_prog __rcu *filter_prog;
213 	struct ethtool_link_ksettings link_ksettings;
214 	/* init args */
215 	struct file *file;
216 	struct ifreq *ifr;
217 };
218 
219 struct veth {
220 	__be16 h_vlan_proto;
221 	__be16 h_vlan_TCI;
222 };
223 
224 static void tun_flow_init(struct tun_struct *tun);
225 static void tun_flow_uninit(struct tun_struct *tun);
226 
tun_napi_receive(struct napi_struct * napi,int budget)227 static int tun_napi_receive(struct napi_struct *napi, int budget)
228 {
229 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
230 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
231 	struct sk_buff_head process_queue;
232 	struct sk_buff *skb;
233 	int received = 0;
234 
235 	__skb_queue_head_init(&process_queue);
236 
237 	spin_lock(&queue->lock);
238 	skb_queue_splice_tail_init(queue, &process_queue);
239 	spin_unlock(&queue->lock);
240 
241 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
242 		napi_gro_receive(napi, skb);
243 		++received;
244 	}
245 
246 	if (!skb_queue_empty(&process_queue)) {
247 		spin_lock(&queue->lock);
248 		skb_queue_splice(&process_queue, queue);
249 		spin_unlock(&queue->lock);
250 	}
251 
252 	return received;
253 }
254 
tun_napi_poll(struct napi_struct * napi,int budget)255 static int tun_napi_poll(struct napi_struct *napi, int budget)
256 {
257 	unsigned int received;
258 
259 	received = tun_napi_receive(napi, budget);
260 
261 	if (received < budget)
262 		napi_complete_done(napi, received);
263 
264 	return received;
265 }
266 
tun_napi_init(struct tun_struct * tun,struct tun_file * tfile,bool napi_en,bool napi_frags)267 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
268 			  bool napi_en, bool napi_frags)
269 {
270 	tfile->napi_enabled = napi_en;
271 	tfile->napi_frags_enabled = napi_en && napi_frags;
272 	if (napi_en) {
273 		netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
274 		napi_enable(&tfile->napi);
275 	}
276 }
277 
tun_napi_enable(struct tun_file * tfile)278 static void tun_napi_enable(struct tun_file *tfile)
279 {
280 	if (tfile->napi_enabled)
281 		napi_enable(&tfile->napi);
282 }
283 
tun_napi_disable(struct tun_file * tfile)284 static void tun_napi_disable(struct tun_file *tfile)
285 {
286 	if (tfile->napi_enabled)
287 		napi_disable(&tfile->napi);
288 }
289 
tun_napi_del(struct tun_file * tfile)290 static void tun_napi_del(struct tun_file *tfile)
291 {
292 	if (tfile->napi_enabled)
293 		netif_napi_del(&tfile->napi);
294 }
295 
tun_napi_frags_enabled(const struct tun_file * tfile)296 static bool tun_napi_frags_enabled(const struct tun_file *tfile)
297 {
298 	return tfile->napi_frags_enabled;
299 }
300 
301 #ifdef CONFIG_TUN_VNET_CROSS_LE
tun_legacy_is_little_endian(struct tun_struct * tun)302 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
303 {
304 	return tun->flags & TUN_VNET_BE ? false :
305 		virtio_legacy_is_little_endian();
306 }
307 
tun_get_vnet_be(struct tun_struct * tun,int __user * argp)308 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
309 {
310 	int be = !!(tun->flags & TUN_VNET_BE);
311 
312 	if (put_user(be, argp))
313 		return -EFAULT;
314 
315 	return 0;
316 }
317 
tun_set_vnet_be(struct tun_struct * tun,int __user * argp)318 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
319 {
320 	int be;
321 
322 	if (get_user(be, argp))
323 		return -EFAULT;
324 
325 	if (be)
326 		tun->flags |= TUN_VNET_BE;
327 	else
328 		tun->flags &= ~TUN_VNET_BE;
329 
330 	return 0;
331 }
332 #else
tun_legacy_is_little_endian(struct tun_struct * tun)333 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
334 {
335 	return virtio_legacy_is_little_endian();
336 }
337 
tun_get_vnet_be(struct tun_struct * tun,int __user * argp)338 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
339 {
340 	return -EINVAL;
341 }
342 
tun_set_vnet_be(struct tun_struct * tun,int __user * argp)343 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
344 {
345 	return -EINVAL;
346 }
347 #endif /* CONFIG_TUN_VNET_CROSS_LE */
348 
tun_is_little_endian(struct tun_struct * tun)349 static inline bool tun_is_little_endian(struct tun_struct *tun)
350 {
351 	return tun->flags & TUN_VNET_LE ||
352 		tun_legacy_is_little_endian(tun);
353 }
354 
tun16_to_cpu(struct tun_struct * tun,__virtio16 val)355 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
356 {
357 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
358 }
359 
cpu_to_tun16(struct tun_struct * tun,u16 val)360 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
361 {
362 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
363 }
364 
tun_hashfn(u32 rxhash)365 static inline u32 tun_hashfn(u32 rxhash)
366 {
367 	return rxhash & TUN_MASK_FLOW_ENTRIES;
368 }
369 
tun_flow_find(struct hlist_head * head,u32 rxhash)370 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
371 {
372 	struct tun_flow_entry *e;
373 
374 	hlist_for_each_entry_rcu(e, head, hash_link) {
375 		if (e->rxhash == rxhash)
376 			return e;
377 	}
378 	return NULL;
379 }
380 
tun_flow_create(struct tun_struct * tun,struct hlist_head * head,u32 rxhash,u16 queue_index)381 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
382 					      struct hlist_head *head,
383 					      u32 rxhash, u16 queue_index)
384 {
385 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
386 
387 	if (e) {
388 		netif_info(tun, tx_queued, tun->dev,
389 			   "create flow: hash %u index %u\n",
390 			   rxhash, queue_index);
391 		e->updated = jiffies;
392 		e->rxhash = rxhash;
393 		e->rps_rxhash = 0;
394 		e->queue_index = queue_index;
395 		e->tun = tun;
396 		hlist_add_head_rcu(&e->hash_link, head);
397 		++tun->flow_count;
398 	}
399 	return e;
400 }
401 
tun_flow_delete(struct tun_struct * tun,struct tun_flow_entry * e)402 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
403 {
404 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
405 		   e->rxhash, e->queue_index);
406 	hlist_del_rcu(&e->hash_link);
407 	kfree_rcu(e, rcu);
408 	--tun->flow_count;
409 }
410 
tun_flow_flush(struct tun_struct * tun)411 static void tun_flow_flush(struct tun_struct *tun)
412 {
413 	int i;
414 
415 	spin_lock_bh(&tun->lock);
416 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
417 		struct tun_flow_entry *e;
418 		struct hlist_node *n;
419 
420 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
421 			tun_flow_delete(tun, e);
422 	}
423 	spin_unlock_bh(&tun->lock);
424 }
425 
tun_flow_delete_by_queue(struct tun_struct * tun,u16 queue_index)426 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
427 {
428 	int i;
429 
430 	spin_lock_bh(&tun->lock);
431 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
432 		struct tun_flow_entry *e;
433 		struct hlist_node *n;
434 
435 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
436 			if (e->queue_index == queue_index)
437 				tun_flow_delete(tun, e);
438 		}
439 	}
440 	spin_unlock_bh(&tun->lock);
441 }
442 
tun_flow_cleanup(struct timer_list * t)443 static void tun_flow_cleanup(struct timer_list *t)
444 {
445 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
446 	unsigned long delay = tun->ageing_time;
447 	unsigned long next_timer = jiffies + delay;
448 	unsigned long count = 0;
449 	int i;
450 
451 	spin_lock(&tun->lock);
452 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
453 		struct tun_flow_entry *e;
454 		struct hlist_node *n;
455 
456 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
457 			unsigned long this_timer;
458 
459 			this_timer = e->updated + delay;
460 			if (time_before_eq(this_timer, jiffies)) {
461 				tun_flow_delete(tun, e);
462 				continue;
463 			}
464 			count++;
465 			if (time_before(this_timer, next_timer))
466 				next_timer = this_timer;
467 		}
468 	}
469 
470 	if (count)
471 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
472 	spin_unlock(&tun->lock);
473 }
474 
tun_flow_update(struct tun_struct * tun,u32 rxhash,struct tun_file * tfile)475 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
476 			    struct tun_file *tfile)
477 {
478 	struct hlist_head *head;
479 	struct tun_flow_entry *e;
480 	unsigned long delay = tun->ageing_time;
481 	u16 queue_index = tfile->queue_index;
482 
483 	head = &tun->flows[tun_hashfn(rxhash)];
484 
485 	rcu_read_lock();
486 
487 	e = tun_flow_find(head, rxhash);
488 	if (likely(e)) {
489 		/* TODO: keep queueing to old queue until it's empty? */
490 		if (READ_ONCE(e->queue_index) != queue_index)
491 			WRITE_ONCE(e->queue_index, queue_index);
492 		if (e->updated != jiffies)
493 			e->updated = jiffies;
494 		sock_rps_record_flow_hash(e->rps_rxhash);
495 	} else {
496 		spin_lock_bh(&tun->lock);
497 		if (!tun_flow_find(head, rxhash) &&
498 		    tun->flow_count < MAX_TAP_FLOWS)
499 			tun_flow_create(tun, head, rxhash, queue_index);
500 
501 		if (!timer_pending(&tun->flow_gc_timer))
502 			mod_timer(&tun->flow_gc_timer,
503 				  round_jiffies_up(jiffies + delay));
504 		spin_unlock_bh(&tun->lock);
505 	}
506 
507 	rcu_read_unlock();
508 }
509 
510 /* Save the hash received in the stack receive path and update the
511  * flow_hash table accordingly.
512  */
tun_flow_save_rps_rxhash(struct tun_flow_entry * e,u32 hash)513 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
514 {
515 	if (unlikely(e->rps_rxhash != hash))
516 		e->rps_rxhash = hash;
517 }
518 
519 /* We try to identify a flow through its rxhash. The reason that
520  * we do not check rxq no. is because some cards(e.g 82599), chooses
521  * the rxq based on the txq where the last packet of the flow comes. As
522  * the userspace application move between processors, we may get a
523  * different rxq no. here.
524  */
tun_automq_select_queue(struct tun_struct * tun,struct sk_buff * skb)525 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
526 {
527 	struct tun_flow_entry *e;
528 	u32 txq, numqueues;
529 
530 	numqueues = READ_ONCE(tun->numqueues);
531 
532 	txq = __skb_get_hash_symmetric(skb);
533 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
534 	if (e) {
535 		tun_flow_save_rps_rxhash(e, txq);
536 		txq = e->queue_index;
537 	} else {
538 		txq = reciprocal_scale(txq, numqueues);
539 	}
540 
541 	return txq;
542 }
543 
tun_ebpf_select_queue(struct tun_struct * tun,struct sk_buff * skb)544 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
545 {
546 	struct tun_prog *prog;
547 	u32 numqueues;
548 	u16 ret = 0;
549 
550 	numqueues = READ_ONCE(tun->numqueues);
551 	if (!numqueues)
552 		return 0;
553 
554 	prog = rcu_dereference(tun->steering_prog);
555 	if (prog)
556 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
557 
558 	return ret % numqueues;
559 }
560 
tun_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)561 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
562 			    struct net_device *sb_dev)
563 {
564 	struct tun_struct *tun = netdev_priv(dev);
565 	u16 ret;
566 
567 	rcu_read_lock();
568 	if (rcu_dereference(tun->steering_prog))
569 		ret = tun_ebpf_select_queue(tun, skb);
570 	else
571 		ret = tun_automq_select_queue(tun, skb);
572 	rcu_read_unlock();
573 
574 	return ret;
575 }
576 
tun_not_capable(struct tun_struct * tun)577 static inline bool tun_not_capable(struct tun_struct *tun)
578 {
579 	const struct cred *cred = current_cred();
580 	struct net *net = dev_net(tun->dev);
581 
582 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
583 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
584 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
585 }
586 
tun_set_real_num_queues(struct tun_struct * tun)587 static void tun_set_real_num_queues(struct tun_struct *tun)
588 {
589 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
590 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
591 }
592 
tun_disable_queue(struct tun_struct * tun,struct tun_file * tfile)593 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
594 {
595 	tfile->detached = tun;
596 	list_add_tail(&tfile->next, &tun->disabled);
597 	++tun->numdisabled;
598 }
599 
tun_enable_queue(struct tun_file * tfile)600 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
601 {
602 	struct tun_struct *tun = tfile->detached;
603 
604 	tfile->detached = NULL;
605 	list_del_init(&tfile->next);
606 	--tun->numdisabled;
607 	return tun;
608 }
609 
tun_ptr_free(void * ptr)610 void tun_ptr_free(void *ptr)
611 {
612 	if (!ptr)
613 		return;
614 	if (tun_is_xdp_frame(ptr)) {
615 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
616 
617 		xdp_return_frame(xdpf);
618 	} else {
619 		__skb_array_destroy_skb(ptr);
620 	}
621 }
622 EXPORT_SYMBOL_GPL(tun_ptr_free);
623 
tun_queue_purge(struct tun_file * tfile)624 static void tun_queue_purge(struct tun_file *tfile)
625 {
626 	void *ptr;
627 
628 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
629 		tun_ptr_free(ptr);
630 
631 	skb_queue_purge(&tfile->sk.sk_write_queue);
632 	skb_queue_purge(&tfile->sk.sk_error_queue);
633 }
634 
__tun_detach(struct tun_file * tfile,bool clean)635 static void __tun_detach(struct tun_file *tfile, bool clean)
636 {
637 	struct tun_file *ntfile;
638 	struct tun_struct *tun;
639 
640 	tun = rtnl_dereference(tfile->tun);
641 
642 	if (tun && clean) {
643 		if (!tfile->detached)
644 			tun_napi_disable(tfile);
645 		tun_napi_del(tfile);
646 	}
647 
648 	if (tun && !tfile->detached) {
649 		u16 index = tfile->queue_index;
650 		BUG_ON(index >= tun->numqueues);
651 
652 		rcu_assign_pointer(tun->tfiles[index],
653 				   tun->tfiles[tun->numqueues - 1]);
654 		ntfile = rtnl_dereference(tun->tfiles[index]);
655 		ntfile->queue_index = index;
656 		ntfile->xdp_rxq.queue_index = index;
657 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
658 				   NULL);
659 
660 		--tun->numqueues;
661 		if (clean) {
662 			RCU_INIT_POINTER(tfile->tun, NULL);
663 			sock_put(&tfile->sk);
664 		} else {
665 			tun_disable_queue(tun, tfile);
666 			tun_napi_disable(tfile);
667 		}
668 
669 		synchronize_net();
670 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
671 		/* Drop read queue */
672 		tun_queue_purge(tfile);
673 		tun_set_real_num_queues(tun);
674 	} else if (tfile->detached && clean) {
675 		tun = tun_enable_queue(tfile);
676 		sock_put(&tfile->sk);
677 	}
678 
679 	if (clean) {
680 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
681 			netif_carrier_off(tun->dev);
682 
683 			if (!(tun->flags & IFF_PERSIST) &&
684 			    tun->dev->reg_state == NETREG_REGISTERED)
685 				unregister_netdevice(tun->dev);
686 		}
687 		if (tun)
688 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
690 	}
691 }
692 
tun_detach(struct tun_file * tfile,bool clean)693 static void tun_detach(struct tun_file *tfile, bool clean)
694 {
695 	struct tun_struct *tun;
696 	struct net_device *dev;
697 
698 	rtnl_lock();
699 	tun = rtnl_dereference(tfile->tun);
700 	dev = tun ? tun->dev : NULL;
701 	__tun_detach(tfile, clean);
702 	if (dev)
703 		netdev_state_change(dev);
704 	rtnl_unlock();
705 
706 	if (clean)
707 		sock_put(&tfile->sk);
708 }
709 
tun_detach_all(struct net_device * dev)710 static void tun_detach_all(struct net_device *dev)
711 {
712 	struct tun_struct *tun = netdev_priv(dev);
713 	struct tun_file *tfile, *tmp;
714 	int i, n = tun->numqueues;
715 
716 	for (i = 0; i < n; i++) {
717 		tfile = rtnl_dereference(tun->tfiles[i]);
718 		BUG_ON(!tfile);
719 		tun_napi_disable(tfile);
720 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
721 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
722 		RCU_INIT_POINTER(tfile->tun, NULL);
723 		--tun->numqueues;
724 	}
725 	list_for_each_entry(tfile, &tun->disabled, next) {
726 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
727 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
728 		RCU_INIT_POINTER(tfile->tun, NULL);
729 	}
730 	BUG_ON(tun->numqueues != 0);
731 
732 	synchronize_net();
733 	for (i = 0; i < n; i++) {
734 		tfile = rtnl_dereference(tun->tfiles[i]);
735 		tun_napi_del(tfile);
736 		/* Drop read queue */
737 		tun_queue_purge(tfile);
738 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
739 		sock_put(&tfile->sk);
740 	}
741 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
742 		tun_napi_del(tfile);
743 		tun_enable_queue(tfile);
744 		tun_queue_purge(tfile);
745 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
746 		sock_put(&tfile->sk);
747 	}
748 	BUG_ON(tun->numdisabled != 0);
749 
750 	if (tun->flags & IFF_PERSIST)
751 		module_put(THIS_MODULE);
752 }
753 
tun_attach(struct tun_struct * tun,struct file * file,bool skip_filter,bool napi,bool napi_frags,bool publish_tun)754 static int tun_attach(struct tun_struct *tun, struct file *file,
755 		      bool skip_filter, bool napi, bool napi_frags,
756 		      bool publish_tun)
757 {
758 	struct tun_file *tfile = file->private_data;
759 	struct net_device *dev = tun->dev;
760 	int err;
761 
762 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
763 	if (err < 0)
764 		goto out;
765 
766 	err = -EINVAL;
767 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
768 		goto out;
769 
770 	err = -EBUSY;
771 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
772 		goto out;
773 
774 	err = -E2BIG;
775 	if (!tfile->detached &&
776 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
777 		goto out;
778 
779 	err = 0;
780 
781 	/* Re-attach the filter to persist device */
782 	if (!skip_filter && (tun->filter_attached == true)) {
783 		lock_sock(tfile->socket.sk);
784 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
785 		release_sock(tfile->socket.sk);
786 		if (!err)
787 			goto out;
788 	}
789 
790 	if (!tfile->detached &&
791 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
792 			    GFP_KERNEL, tun_ptr_free)) {
793 		err = -ENOMEM;
794 		goto out;
795 	}
796 
797 	tfile->queue_index = tun->numqueues;
798 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
799 
800 	if (tfile->detached) {
801 		/* Re-attach detached tfile, updating XDP queue_index */
802 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
803 
804 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
805 			tfile->xdp_rxq.queue_index = tfile->queue_index;
806 	} else {
807 		/* Setup XDP RX-queue info, for new tfile getting attached */
808 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
809 				       tun->dev, tfile->queue_index, 0);
810 		if (err < 0)
811 			goto out;
812 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
813 						 MEM_TYPE_PAGE_SHARED, NULL);
814 		if (err < 0) {
815 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
816 			goto out;
817 		}
818 		err = 0;
819 	}
820 
821 	if (tfile->detached) {
822 		tun_enable_queue(tfile);
823 		tun_napi_enable(tfile);
824 	} else {
825 		sock_hold(&tfile->sk);
826 		tun_napi_init(tun, tfile, napi, napi_frags);
827 	}
828 
829 	if (rtnl_dereference(tun->xdp_prog))
830 		sock_set_flag(&tfile->sk, SOCK_XDP);
831 
832 	/* device is allowed to go away first, so no need to hold extra
833 	 * refcnt.
834 	 */
835 
836 	/* Publish tfile->tun and tun->tfiles only after we've fully
837 	 * initialized tfile; otherwise we risk using half-initialized
838 	 * object.
839 	 */
840 	if (publish_tun)
841 		rcu_assign_pointer(tfile->tun, tun);
842 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
843 	tun->numqueues++;
844 	tun_set_real_num_queues(tun);
845 out:
846 	return err;
847 }
848 
tun_get(struct tun_file * tfile)849 static struct tun_struct *tun_get(struct tun_file *tfile)
850 {
851 	struct tun_struct *tun;
852 
853 	rcu_read_lock();
854 	tun = rcu_dereference(tfile->tun);
855 	if (tun)
856 		dev_hold(tun->dev);
857 	rcu_read_unlock();
858 
859 	return tun;
860 }
861 
tun_put(struct tun_struct * tun)862 static void tun_put(struct tun_struct *tun)
863 {
864 	dev_put(tun->dev);
865 }
866 
867 /* TAP filtering */
addr_hash_set(u32 * mask,const u8 * addr)868 static void addr_hash_set(u32 *mask, const u8 *addr)
869 {
870 	int n = ether_crc(ETH_ALEN, addr) >> 26;
871 	mask[n >> 5] |= (1 << (n & 31));
872 }
873 
addr_hash_test(const u32 * mask,const u8 * addr)874 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
875 {
876 	int n = ether_crc(ETH_ALEN, addr) >> 26;
877 	return mask[n >> 5] & (1 << (n & 31));
878 }
879 
update_filter(struct tap_filter * filter,void __user * arg)880 static int update_filter(struct tap_filter *filter, void __user *arg)
881 {
882 	struct { u8 u[ETH_ALEN]; } *addr;
883 	struct tun_filter uf;
884 	int err, alen, n, nexact;
885 
886 	if (copy_from_user(&uf, arg, sizeof(uf)))
887 		return -EFAULT;
888 
889 	if (!uf.count) {
890 		/* Disabled */
891 		filter->count = 0;
892 		return 0;
893 	}
894 
895 	alen = ETH_ALEN * uf.count;
896 	addr = memdup_user(arg + sizeof(uf), alen);
897 	if (IS_ERR(addr))
898 		return PTR_ERR(addr);
899 
900 	/* The filter is updated without holding any locks. Which is
901 	 * perfectly safe. We disable it first and in the worst
902 	 * case we'll accept a few undesired packets. */
903 	filter->count = 0;
904 	wmb();
905 
906 	/* Use first set of addresses as an exact filter */
907 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
908 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
909 
910 	nexact = n;
911 
912 	/* Remaining multicast addresses are hashed,
913 	 * unicast will leave the filter disabled. */
914 	memset(filter->mask, 0, sizeof(filter->mask));
915 	for (; n < uf.count; n++) {
916 		if (!is_multicast_ether_addr(addr[n].u)) {
917 			err = 0; /* no filter */
918 			goto free_addr;
919 		}
920 		addr_hash_set(filter->mask, addr[n].u);
921 	}
922 
923 	/* For ALLMULTI just set the mask to all ones.
924 	 * This overrides the mask populated above. */
925 	if ((uf.flags & TUN_FLT_ALLMULTI))
926 		memset(filter->mask, ~0, sizeof(filter->mask));
927 
928 	/* Now enable the filter */
929 	wmb();
930 	filter->count = nexact;
931 
932 	/* Return the number of exact filters */
933 	err = nexact;
934 free_addr:
935 	kfree(addr);
936 	return err;
937 }
938 
939 /* Returns: 0 - drop, !=0 - accept */
run_filter(struct tap_filter * filter,const struct sk_buff * skb)940 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
941 {
942 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
943 	 * at this point. */
944 	struct ethhdr *eh = (struct ethhdr *) skb->data;
945 	int i;
946 
947 	/* Exact match */
948 	for (i = 0; i < filter->count; i++)
949 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
950 			return 1;
951 
952 	/* Inexact match (multicast only) */
953 	if (is_multicast_ether_addr(eh->h_dest))
954 		return addr_hash_test(filter->mask, eh->h_dest);
955 
956 	return 0;
957 }
958 
959 /*
960  * Checks whether the packet is accepted or not.
961  * Returns: 0 - drop, !=0 - accept
962  */
check_filter(struct tap_filter * filter,const struct sk_buff * skb)963 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
964 {
965 	if (!filter->count)
966 		return 1;
967 
968 	return run_filter(filter, skb);
969 }
970 
971 /* Network device part of the driver */
972 
973 static const struct ethtool_ops tun_ethtool_ops;
974 
tun_net_init(struct net_device * dev)975 static int tun_net_init(struct net_device *dev)
976 {
977 	struct tun_struct *tun = netdev_priv(dev);
978 	struct ifreq *ifr = tun->ifr;
979 	int err;
980 
981 	spin_lock_init(&tun->lock);
982 
983 	err = security_tun_dev_alloc_security(&tun->security);
984 	if (err < 0)
985 		return err;
986 
987 	tun_flow_init(tun);
988 
989 	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
990 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
991 			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
992 			   NETIF_F_HW_VLAN_STAG_TX;
993 	dev->features = dev->hw_features | NETIF_F_LLTX;
994 	dev->vlan_features = dev->features &
995 			     ~(NETIF_F_HW_VLAN_CTAG_TX |
996 			       NETIF_F_HW_VLAN_STAG_TX);
997 
998 	tun->flags = (tun->flags & ~TUN_FEATURES) |
999 		      (ifr->ifr_flags & TUN_FEATURES);
1000 
1001 	INIT_LIST_HEAD(&tun->disabled);
1002 	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1003 			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1004 	if (err < 0) {
1005 		tun_flow_uninit(tun);
1006 		security_tun_dev_free_security(tun->security);
1007 		return err;
1008 	}
1009 	return 0;
1010 }
1011 
1012 /* Net device detach from fd. */
tun_net_uninit(struct net_device * dev)1013 static void tun_net_uninit(struct net_device *dev)
1014 {
1015 	tun_detach_all(dev);
1016 }
1017 
1018 /* Net device open. */
tun_net_open(struct net_device * dev)1019 static int tun_net_open(struct net_device *dev)
1020 {
1021 	netif_tx_start_all_queues(dev);
1022 
1023 	return 0;
1024 }
1025 
1026 /* Net device close. */
tun_net_close(struct net_device * dev)1027 static int tun_net_close(struct net_device *dev)
1028 {
1029 	netif_tx_stop_all_queues(dev);
1030 	return 0;
1031 }
1032 
1033 /* Net device start xmit */
tun_automq_xmit(struct tun_struct * tun,struct sk_buff * skb)1034 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1035 {
1036 #ifdef CONFIG_RPS
1037 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1038 		/* Select queue was not called for the skbuff, so we extract the
1039 		 * RPS hash and save it into the flow_table here.
1040 		 */
1041 		struct tun_flow_entry *e;
1042 		__u32 rxhash;
1043 
1044 		rxhash = __skb_get_hash_symmetric(skb);
1045 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1046 		if (e)
1047 			tun_flow_save_rps_rxhash(e, rxhash);
1048 	}
1049 #endif
1050 }
1051 
run_ebpf_filter(struct tun_struct * tun,struct sk_buff * skb,int len)1052 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1053 				    struct sk_buff *skb,
1054 				    int len)
1055 {
1056 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1057 
1058 	if (prog)
1059 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1060 
1061 	return len;
1062 }
1063 
1064 /* Net device start xmit */
tun_net_xmit(struct sk_buff * skb,struct net_device * dev)1065 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1066 {
1067 	struct tun_struct *tun = netdev_priv(dev);
1068 	enum skb_drop_reason drop_reason;
1069 	int txq = skb->queue_mapping;
1070 	struct netdev_queue *queue;
1071 	struct tun_file *tfile;
1072 	int len = skb->len;
1073 
1074 	rcu_read_lock();
1075 	tfile = rcu_dereference(tun->tfiles[txq]);
1076 
1077 	/* Drop packet if interface is not attached */
1078 	if (!tfile) {
1079 		drop_reason = SKB_DROP_REASON_DEV_READY;
1080 		goto drop;
1081 	}
1082 
1083 	if (!rcu_dereference(tun->steering_prog))
1084 		tun_automq_xmit(tun, skb);
1085 
1086 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1087 
1088 	/* Drop if the filter does not like it.
1089 	 * This is a noop if the filter is disabled.
1090 	 * Filter can be enabled only for the TAP devices. */
1091 	if (!check_filter(&tun->txflt, skb)) {
1092 		drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1093 		goto drop;
1094 	}
1095 
1096 	if (tfile->socket.sk->sk_filter &&
1097 	    sk_filter(tfile->socket.sk, skb)) {
1098 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1099 		goto drop;
1100 	}
1101 
1102 	len = run_ebpf_filter(tun, skb, len);
1103 	if (len == 0) {
1104 		drop_reason = SKB_DROP_REASON_TAP_FILTER;
1105 		goto drop;
1106 	}
1107 
1108 	if (pskb_trim(skb, len)) {
1109 		drop_reason = SKB_DROP_REASON_NOMEM;
1110 		goto drop;
1111 	}
1112 
1113 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1114 		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1115 		goto drop;
1116 	}
1117 
1118 	skb_tx_timestamp(skb);
1119 
1120 	/* Orphan the skb - required as we might hang on to it
1121 	 * for indefinite time.
1122 	 */
1123 	skb_orphan(skb);
1124 
1125 	nf_reset_ct(skb);
1126 
1127 	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1128 		drop_reason = SKB_DROP_REASON_FULL_RING;
1129 		goto drop;
1130 	}
1131 
1132 	/* NETIF_F_LLTX requires to do our own update of trans_start */
1133 	queue = netdev_get_tx_queue(dev, txq);
1134 	txq_trans_cond_update(queue);
1135 
1136 	/* Notify and wake up reader process */
1137 	if (tfile->flags & TUN_FASYNC)
1138 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1139 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1140 
1141 	rcu_read_unlock();
1142 	return NETDEV_TX_OK;
1143 
1144 drop:
1145 	dev_core_stats_tx_dropped_inc(dev);
1146 	skb_tx_error(skb);
1147 	kfree_skb_reason(skb, drop_reason);
1148 	rcu_read_unlock();
1149 	return NET_XMIT_DROP;
1150 }
1151 
tun_net_mclist(struct net_device * dev)1152 static void tun_net_mclist(struct net_device *dev)
1153 {
1154 	/*
1155 	 * This callback is supposed to deal with mc filter in
1156 	 * _rx_ path and has nothing to do with the _tx_ path.
1157 	 * In rx path we always accept everything userspace gives us.
1158 	 */
1159 }
1160 
tun_net_fix_features(struct net_device * dev,netdev_features_t features)1161 static netdev_features_t tun_net_fix_features(struct net_device *dev,
1162 	netdev_features_t features)
1163 {
1164 	struct tun_struct *tun = netdev_priv(dev);
1165 
1166 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1167 }
1168 
tun_set_headroom(struct net_device * dev,int new_hr)1169 static void tun_set_headroom(struct net_device *dev, int new_hr)
1170 {
1171 	struct tun_struct *tun = netdev_priv(dev);
1172 
1173 	if (new_hr < NET_SKB_PAD)
1174 		new_hr = NET_SKB_PAD;
1175 
1176 	tun->align = new_hr;
1177 }
1178 
1179 static void
tun_net_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1180 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1181 {
1182 	struct tun_struct *tun = netdev_priv(dev);
1183 
1184 	dev_get_tstats64(dev, stats);
1185 
1186 	stats->rx_frame_errors +=
1187 		(unsigned long)atomic_long_read(&tun->rx_frame_errors);
1188 }
1189 
tun_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)1190 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1191 		       struct netlink_ext_ack *extack)
1192 {
1193 	struct tun_struct *tun = netdev_priv(dev);
1194 	struct tun_file *tfile;
1195 	struct bpf_prog *old_prog;
1196 	int i;
1197 
1198 	old_prog = rtnl_dereference(tun->xdp_prog);
1199 	rcu_assign_pointer(tun->xdp_prog, prog);
1200 	if (old_prog)
1201 		bpf_prog_put(old_prog);
1202 
1203 	for (i = 0; i < tun->numqueues; i++) {
1204 		tfile = rtnl_dereference(tun->tfiles[i]);
1205 		if (prog)
1206 			sock_set_flag(&tfile->sk, SOCK_XDP);
1207 		else
1208 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1209 	}
1210 	list_for_each_entry(tfile, &tun->disabled, next) {
1211 		if (prog)
1212 			sock_set_flag(&tfile->sk, SOCK_XDP);
1213 		else
1214 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1215 	}
1216 
1217 	return 0;
1218 }
1219 
tun_xdp(struct net_device * dev,struct netdev_bpf * xdp)1220 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1221 {
1222 	switch (xdp->command) {
1223 	case XDP_SETUP_PROG:
1224 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1225 	default:
1226 		return -EINVAL;
1227 	}
1228 }
1229 
tun_net_change_carrier(struct net_device * dev,bool new_carrier)1230 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1231 {
1232 	if (new_carrier) {
1233 		struct tun_struct *tun = netdev_priv(dev);
1234 
1235 		if (!tun->numqueues)
1236 			return -EPERM;
1237 
1238 		netif_carrier_on(dev);
1239 	} else {
1240 		netif_carrier_off(dev);
1241 	}
1242 	return 0;
1243 }
1244 
1245 static const struct net_device_ops tun_netdev_ops = {
1246 	.ndo_init		= tun_net_init,
1247 	.ndo_uninit		= tun_net_uninit,
1248 	.ndo_open		= tun_net_open,
1249 	.ndo_stop		= tun_net_close,
1250 	.ndo_start_xmit		= tun_net_xmit,
1251 	.ndo_fix_features	= tun_net_fix_features,
1252 	.ndo_select_queue	= tun_select_queue,
1253 	.ndo_set_rx_headroom	= tun_set_headroom,
1254 	.ndo_get_stats64	= tun_net_get_stats64,
1255 	.ndo_change_carrier	= tun_net_change_carrier,
1256 };
1257 
__tun_xdp_flush_tfile(struct tun_file * tfile)1258 static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1259 {
1260 	/* Notify and wake up reader process */
1261 	if (tfile->flags & TUN_FASYNC)
1262 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1263 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1264 }
1265 
tun_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1266 static int tun_xdp_xmit(struct net_device *dev, int n,
1267 			struct xdp_frame **frames, u32 flags)
1268 {
1269 	struct tun_struct *tun = netdev_priv(dev);
1270 	struct tun_file *tfile;
1271 	u32 numqueues;
1272 	int nxmit = 0;
1273 	int i;
1274 
1275 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1276 		return -EINVAL;
1277 
1278 	rcu_read_lock();
1279 
1280 resample:
1281 	numqueues = READ_ONCE(tun->numqueues);
1282 	if (!numqueues) {
1283 		rcu_read_unlock();
1284 		return -ENXIO; /* Caller will free/return all frames */
1285 	}
1286 
1287 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1288 					    numqueues]);
1289 	if (unlikely(!tfile))
1290 		goto resample;
1291 
1292 	spin_lock(&tfile->tx_ring.producer_lock);
1293 	for (i = 0; i < n; i++) {
1294 		struct xdp_frame *xdp = frames[i];
1295 		/* Encode the XDP flag into lowest bit for consumer to differ
1296 		 * XDP buffer from sk_buff.
1297 		 */
1298 		void *frame = tun_xdp_to_ptr(xdp);
1299 
1300 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1301 			dev_core_stats_tx_dropped_inc(dev);
1302 			break;
1303 		}
1304 		nxmit++;
1305 	}
1306 	spin_unlock(&tfile->tx_ring.producer_lock);
1307 
1308 	if (flags & XDP_XMIT_FLUSH)
1309 		__tun_xdp_flush_tfile(tfile);
1310 
1311 	rcu_read_unlock();
1312 	return nxmit;
1313 }
1314 
tun_xdp_tx(struct net_device * dev,struct xdp_buff * xdp)1315 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1316 {
1317 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1318 	int nxmit;
1319 
1320 	if (unlikely(!frame))
1321 		return -EOVERFLOW;
1322 
1323 	nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1324 	if (!nxmit)
1325 		xdp_return_frame_rx_napi(frame);
1326 	return nxmit;
1327 }
1328 
1329 static const struct net_device_ops tap_netdev_ops = {
1330 	.ndo_init		= tun_net_init,
1331 	.ndo_uninit		= tun_net_uninit,
1332 	.ndo_open		= tun_net_open,
1333 	.ndo_stop		= tun_net_close,
1334 	.ndo_start_xmit		= tun_net_xmit,
1335 	.ndo_fix_features	= tun_net_fix_features,
1336 	.ndo_set_rx_mode	= tun_net_mclist,
1337 	.ndo_set_mac_address	= eth_mac_addr,
1338 	.ndo_validate_addr	= eth_validate_addr,
1339 	.ndo_select_queue	= tun_select_queue,
1340 	.ndo_features_check	= passthru_features_check,
1341 	.ndo_set_rx_headroom	= tun_set_headroom,
1342 	.ndo_bpf		= tun_xdp,
1343 	.ndo_xdp_xmit		= tun_xdp_xmit,
1344 	.ndo_change_carrier	= tun_net_change_carrier,
1345 };
1346 
tun_flow_init(struct tun_struct * tun)1347 static void tun_flow_init(struct tun_struct *tun)
1348 {
1349 	int i;
1350 
1351 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1352 		INIT_HLIST_HEAD(&tun->flows[i]);
1353 
1354 	tun->ageing_time = TUN_FLOW_EXPIRE;
1355 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1356 	mod_timer(&tun->flow_gc_timer,
1357 		  round_jiffies_up(jiffies + tun->ageing_time));
1358 }
1359 
tun_flow_uninit(struct tun_struct * tun)1360 static void tun_flow_uninit(struct tun_struct *tun)
1361 {
1362 	del_timer_sync(&tun->flow_gc_timer);
1363 	tun_flow_flush(tun);
1364 }
1365 
1366 #define MIN_MTU 68
1367 #define MAX_MTU 65535
1368 
1369 /* Initialize net device. */
tun_net_initialize(struct net_device * dev)1370 static void tun_net_initialize(struct net_device *dev)
1371 {
1372 	struct tun_struct *tun = netdev_priv(dev);
1373 
1374 	switch (tun->flags & TUN_TYPE_MASK) {
1375 	case IFF_TUN:
1376 		dev->netdev_ops = &tun_netdev_ops;
1377 		dev->header_ops = &ip_tunnel_header_ops;
1378 
1379 		/* Point-to-Point TUN Device */
1380 		dev->hard_header_len = 0;
1381 		dev->addr_len = 0;
1382 		dev->mtu = 1500;
1383 
1384 		/* Zero header length */
1385 		dev->type = ARPHRD_NONE;
1386 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1387 		break;
1388 
1389 	case IFF_TAP:
1390 		dev->netdev_ops = &tap_netdev_ops;
1391 		/* Ethernet TAP Device */
1392 		ether_setup(dev);
1393 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1394 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1395 
1396 		eth_hw_addr_random(dev);
1397 
1398 		/* Currently tun does not support XDP, only tap does. */
1399 		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1400 				    NETDEV_XDP_ACT_REDIRECT |
1401 				    NETDEV_XDP_ACT_NDO_XMIT;
1402 
1403 		break;
1404 	}
1405 
1406 	dev->min_mtu = MIN_MTU;
1407 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1408 }
1409 
tun_sock_writeable(struct tun_struct * tun,struct tun_file * tfile)1410 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1411 {
1412 	struct sock *sk = tfile->socket.sk;
1413 
1414 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1415 }
1416 
1417 /* Character device part */
1418 
1419 /* Poll */
tun_chr_poll(struct file * file,poll_table * wait)1420 static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1421 {
1422 	struct tun_file *tfile = file->private_data;
1423 	struct tun_struct *tun = tun_get(tfile);
1424 	struct sock *sk;
1425 	__poll_t mask = 0;
1426 
1427 	if (!tun)
1428 		return EPOLLERR;
1429 
1430 	sk = tfile->socket.sk;
1431 
1432 	poll_wait(file, sk_sleep(sk), wait);
1433 
1434 	if (!ptr_ring_empty(&tfile->tx_ring))
1435 		mask |= EPOLLIN | EPOLLRDNORM;
1436 
1437 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1438 	 * guarantee EPOLLOUT to be raised by either here or
1439 	 * tun_sock_write_space(). Then process could get notification
1440 	 * after it writes to a down device and meets -EIO.
1441 	 */
1442 	if (tun_sock_writeable(tun, tfile) ||
1443 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1444 	     tun_sock_writeable(tun, tfile)))
1445 		mask |= EPOLLOUT | EPOLLWRNORM;
1446 
1447 	if (tun->dev->reg_state != NETREG_REGISTERED)
1448 		mask = EPOLLERR;
1449 
1450 	tun_put(tun);
1451 	return mask;
1452 }
1453 
tun_napi_alloc_frags(struct tun_file * tfile,size_t len,const struct iov_iter * it)1454 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1455 					    size_t len,
1456 					    const struct iov_iter *it)
1457 {
1458 	struct sk_buff *skb;
1459 	size_t linear;
1460 	int err;
1461 	int i;
1462 
1463 	if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1464 	    len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1465 		return ERR_PTR(-EMSGSIZE);
1466 
1467 	local_bh_disable();
1468 	skb = napi_get_frags(&tfile->napi);
1469 	local_bh_enable();
1470 	if (!skb)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	linear = iov_iter_single_seg_count(it);
1474 	err = __skb_grow(skb, linear);
1475 	if (err)
1476 		goto free;
1477 
1478 	skb->len = len;
1479 	skb->data_len = len - linear;
1480 	skb->truesize += skb->data_len;
1481 
1482 	for (i = 1; i < it->nr_segs; i++) {
1483 		const struct iovec *iov = iter_iov(it);
1484 		size_t fragsz = iov->iov_len;
1485 		struct page *page;
1486 		void *frag;
1487 
1488 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1489 			err = -EINVAL;
1490 			goto free;
1491 		}
1492 		frag = netdev_alloc_frag(fragsz);
1493 		if (!frag) {
1494 			err = -ENOMEM;
1495 			goto free;
1496 		}
1497 		page = virt_to_head_page(frag);
1498 		skb_fill_page_desc(skb, i - 1, page,
1499 				   frag - page_address(page), fragsz);
1500 	}
1501 
1502 	return skb;
1503 free:
1504 	/* frees skb and all frags allocated with napi_alloc_frag() */
1505 	napi_free_frags(&tfile->napi);
1506 	return ERR_PTR(err);
1507 }
1508 
1509 /* prepad is the amount to reserve at front.  len is length after that.
1510  * linear is a hint as to how much to copy (usually headers). */
tun_alloc_skb(struct tun_file * tfile,size_t prepad,size_t len,size_t linear,int noblock)1511 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1512 				     size_t prepad, size_t len,
1513 				     size_t linear, int noblock)
1514 {
1515 	struct sock *sk = tfile->socket.sk;
1516 	struct sk_buff *skb;
1517 	int err;
1518 
1519 	/* Under a page?  Don't bother with paged skb. */
1520 	if (prepad + len < PAGE_SIZE)
1521 		linear = len;
1522 
1523 	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1524 		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1525 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1526 				   &err, PAGE_ALLOC_COSTLY_ORDER);
1527 	if (!skb)
1528 		return ERR_PTR(err);
1529 
1530 	skb_reserve(skb, prepad);
1531 	skb_put(skb, linear);
1532 	skb->data_len = len - linear;
1533 	skb->len += len - linear;
1534 
1535 	return skb;
1536 }
1537 
tun_rx_batched(struct tun_struct * tun,struct tun_file * tfile,struct sk_buff * skb,int more)1538 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1539 			   struct sk_buff *skb, int more)
1540 {
1541 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1542 	struct sk_buff_head process_queue;
1543 	u32 rx_batched = tun->rx_batched;
1544 	bool rcv = false;
1545 
1546 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1547 		local_bh_disable();
1548 		skb_record_rx_queue(skb, tfile->queue_index);
1549 		netif_receive_skb(skb);
1550 		local_bh_enable();
1551 		return;
1552 	}
1553 
1554 	spin_lock(&queue->lock);
1555 	if (!more || skb_queue_len(queue) == rx_batched) {
1556 		__skb_queue_head_init(&process_queue);
1557 		skb_queue_splice_tail_init(queue, &process_queue);
1558 		rcv = true;
1559 	} else {
1560 		__skb_queue_tail(queue, skb);
1561 	}
1562 	spin_unlock(&queue->lock);
1563 
1564 	if (rcv) {
1565 		struct sk_buff *nskb;
1566 
1567 		local_bh_disable();
1568 		while ((nskb = __skb_dequeue(&process_queue))) {
1569 			skb_record_rx_queue(nskb, tfile->queue_index);
1570 			netif_receive_skb(nskb);
1571 		}
1572 		skb_record_rx_queue(skb, tfile->queue_index);
1573 		netif_receive_skb(skb);
1574 		local_bh_enable();
1575 	}
1576 }
1577 
tun_can_build_skb(struct tun_struct * tun,struct tun_file * tfile,int len,int noblock,bool zerocopy)1578 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1579 			      int len, int noblock, bool zerocopy)
1580 {
1581 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1582 		return false;
1583 
1584 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1585 		return false;
1586 
1587 	if (!noblock)
1588 		return false;
1589 
1590 	if (zerocopy)
1591 		return false;
1592 
1593 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1594 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1595 		return false;
1596 
1597 	return true;
1598 }
1599 
__tun_build_skb(struct tun_file * tfile,struct page_frag * alloc_frag,char * buf,int buflen,int len,int pad)1600 static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1601 				       struct page_frag *alloc_frag, char *buf,
1602 				       int buflen, int len, int pad)
1603 {
1604 	struct sk_buff *skb = build_skb(buf, buflen);
1605 
1606 	if (!skb)
1607 		return ERR_PTR(-ENOMEM);
1608 
1609 	skb_reserve(skb, pad);
1610 	skb_put(skb, len);
1611 	skb_set_owner_w(skb, tfile->socket.sk);
1612 
1613 	get_page(alloc_frag->page);
1614 	alloc_frag->offset += buflen;
1615 
1616 	return skb;
1617 }
1618 
tun_xdp_act(struct tun_struct * tun,struct bpf_prog * xdp_prog,struct xdp_buff * xdp,u32 act)1619 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1620 		       struct xdp_buff *xdp, u32 act)
1621 {
1622 	int err;
1623 
1624 	switch (act) {
1625 	case XDP_REDIRECT:
1626 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1627 		if (err) {
1628 			dev_core_stats_rx_dropped_inc(tun->dev);
1629 			return err;
1630 		}
1631 		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1632 		break;
1633 	case XDP_TX:
1634 		err = tun_xdp_tx(tun->dev, xdp);
1635 		if (err < 0) {
1636 			dev_core_stats_rx_dropped_inc(tun->dev);
1637 			return err;
1638 		}
1639 		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1640 		break;
1641 	case XDP_PASS:
1642 		break;
1643 	default:
1644 		bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1645 		fallthrough;
1646 	case XDP_ABORTED:
1647 		trace_xdp_exception(tun->dev, xdp_prog, act);
1648 		fallthrough;
1649 	case XDP_DROP:
1650 		dev_core_stats_rx_dropped_inc(tun->dev);
1651 		break;
1652 	}
1653 
1654 	return act;
1655 }
1656 
tun_build_skb(struct tun_struct * tun,struct tun_file * tfile,struct iov_iter * from,struct virtio_net_hdr * hdr,int len,int * skb_xdp)1657 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1658 				     struct tun_file *tfile,
1659 				     struct iov_iter *from,
1660 				     struct virtio_net_hdr *hdr,
1661 				     int len, int *skb_xdp)
1662 {
1663 	struct page_frag *alloc_frag = &current->task_frag;
1664 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
1665 	struct bpf_prog *xdp_prog;
1666 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1667 	char *buf;
1668 	size_t copied;
1669 	int pad = TUN_RX_PAD;
1670 	int err = 0;
1671 
1672 	rcu_read_lock();
1673 	xdp_prog = rcu_dereference(tun->xdp_prog);
1674 	if (xdp_prog)
1675 		pad += XDP_PACKET_HEADROOM;
1676 	buflen += SKB_DATA_ALIGN(len + pad);
1677 	rcu_read_unlock();
1678 
1679 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1680 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1681 		return ERR_PTR(-ENOMEM);
1682 
1683 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1684 	copied = copy_page_from_iter(alloc_frag->page,
1685 				     alloc_frag->offset + pad,
1686 				     len, from);
1687 	if (copied != len)
1688 		return ERR_PTR(-EFAULT);
1689 
1690 	/* There's a small window that XDP may be set after the check
1691 	 * of xdp_prog above, this should be rare and for simplicity
1692 	 * we do XDP on skb in case the headroom is not enough.
1693 	 */
1694 	if (hdr->gso_type || !xdp_prog) {
1695 		*skb_xdp = 1;
1696 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1697 				       pad);
1698 	}
1699 
1700 	*skb_xdp = 0;
1701 
1702 	local_bh_disable();
1703 	rcu_read_lock();
1704 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
1705 	xdp_prog = rcu_dereference(tun->xdp_prog);
1706 	if (xdp_prog) {
1707 		struct xdp_buff xdp;
1708 		u32 act;
1709 
1710 		xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1711 		xdp_prepare_buff(&xdp, buf, pad, len, false);
1712 
1713 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1714 		if (act == XDP_REDIRECT || act == XDP_TX) {
1715 			get_page(alloc_frag->page);
1716 			alloc_frag->offset += buflen;
1717 		}
1718 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1719 		if (err < 0) {
1720 			if (act == XDP_REDIRECT || act == XDP_TX)
1721 				put_page(alloc_frag->page);
1722 			goto out;
1723 		}
1724 
1725 		if (err == XDP_REDIRECT)
1726 			xdp_do_flush();
1727 		if (err != XDP_PASS)
1728 			goto out;
1729 
1730 		pad = xdp.data - xdp.data_hard_start;
1731 		len = xdp.data_end - xdp.data;
1732 	}
1733 	bpf_net_ctx_clear(bpf_net_ctx);
1734 	rcu_read_unlock();
1735 	local_bh_enable();
1736 
1737 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1738 
1739 out:
1740 	bpf_net_ctx_clear(bpf_net_ctx);
1741 	rcu_read_unlock();
1742 	local_bh_enable();
1743 	return NULL;
1744 }
1745 
1746 /* Get packet from user space buffer */
tun_get_user(struct tun_struct * tun,struct tun_file * tfile,void * msg_control,struct iov_iter * from,int noblock,bool more)1747 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1748 			    void *msg_control, struct iov_iter *from,
1749 			    int noblock, bool more)
1750 {
1751 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1752 	struct sk_buff *skb;
1753 	size_t total_len = iov_iter_count(from);
1754 	size_t len = total_len, align = tun->align, linear;
1755 	struct virtio_net_hdr gso = { 0 };
1756 	int good_linear;
1757 	int copylen;
1758 	bool zerocopy = false;
1759 	int err;
1760 	u32 rxhash = 0;
1761 	int skb_xdp = 1;
1762 	bool frags = tun_napi_frags_enabled(tfile);
1763 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1764 
1765 	if (!(tun->flags & IFF_NO_PI)) {
1766 		if (len < sizeof(pi))
1767 			return -EINVAL;
1768 		len -= sizeof(pi);
1769 
1770 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1771 			return -EFAULT;
1772 	}
1773 
1774 	if (tun->flags & IFF_VNET_HDR) {
1775 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1776 
1777 		if (len < vnet_hdr_sz)
1778 			return -EINVAL;
1779 		len -= vnet_hdr_sz;
1780 
1781 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1782 			return -EFAULT;
1783 
1784 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1785 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1786 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1787 
1788 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1789 			return -EINVAL;
1790 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1791 	}
1792 
1793 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1794 		align += NET_IP_ALIGN;
1795 		if (unlikely(len < ETH_HLEN ||
1796 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1797 			return -EINVAL;
1798 	}
1799 
1800 	good_linear = SKB_MAX_HEAD(align);
1801 
1802 	if (msg_control) {
1803 		struct iov_iter i = *from;
1804 
1805 		/* There are 256 bytes to be copied in skb, so there is
1806 		 * enough room for skb expand head in case it is used.
1807 		 * The rest of the buffer is mapped from userspace.
1808 		 */
1809 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1810 		if (copylen > good_linear)
1811 			copylen = good_linear;
1812 		linear = copylen;
1813 		iov_iter_advance(&i, copylen);
1814 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1815 			zerocopy = true;
1816 	}
1817 
1818 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1819 		/* For the packet that is not easy to be processed
1820 		 * (e.g gso or jumbo packet), we will do it at after
1821 		 * skb was created with generic XDP routine.
1822 		 */
1823 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1824 		err = PTR_ERR_OR_ZERO(skb);
1825 		if (err)
1826 			goto drop;
1827 		if (!skb)
1828 			return total_len;
1829 	} else {
1830 		if (!zerocopy) {
1831 			copylen = len;
1832 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1833 				linear = good_linear;
1834 			else
1835 				linear = tun16_to_cpu(tun, gso.hdr_len);
1836 		}
1837 
1838 		if (frags) {
1839 			mutex_lock(&tfile->napi_mutex);
1840 			skb = tun_napi_alloc_frags(tfile, copylen, from);
1841 			/* tun_napi_alloc_frags() enforces a layout for the skb.
1842 			 * If zerocopy is enabled, then this layout will be
1843 			 * overwritten by zerocopy_sg_from_iter().
1844 			 */
1845 			zerocopy = false;
1846 		} else {
1847 			if (!linear)
1848 				linear = min_t(size_t, good_linear, copylen);
1849 
1850 			skb = tun_alloc_skb(tfile, align, copylen, linear,
1851 					    noblock);
1852 		}
1853 
1854 		err = PTR_ERR_OR_ZERO(skb);
1855 		if (err)
1856 			goto drop;
1857 
1858 		if (zerocopy)
1859 			err = zerocopy_sg_from_iter(skb, from);
1860 		else
1861 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1862 
1863 		if (err) {
1864 			err = -EFAULT;
1865 			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1866 			goto drop;
1867 		}
1868 	}
1869 
1870 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1871 		atomic_long_inc(&tun->rx_frame_errors);
1872 		err = -EINVAL;
1873 		goto free_skb;
1874 	}
1875 
1876 	switch (tun->flags & TUN_TYPE_MASK) {
1877 	case IFF_TUN:
1878 		if (tun->flags & IFF_NO_PI) {
1879 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1880 
1881 			switch (ip_version) {
1882 			case 4:
1883 				pi.proto = htons(ETH_P_IP);
1884 				break;
1885 			case 6:
1886 				pi.proto = htons(ETH_P_IPV6);
1887 				break;
1888 			default:
1889 				err = -EINVAL;
1890 				goto drop;
1891 			}
1892 		}
1893 
1894 		skb_reset_mac_header(skb);
1895 		skb->protocol = pi.proto;
1896 		skb->dev = tun->dev;
1897 		break;
1898 	case IFF_TAP:
1899 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1900 			err = -ENOMEM;
1901 			drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1902 			goto drop;
1903 		}
1904 		skb->protocol = eth_type_trans(skb, tun->dev);
1905 		break;
1906 	}
1907 
1908 	/* copy skb_ubuf_info for callback when skb has no error */
1909 	if (zerocopy) {
1910 		skb_zcopy_init(skb, msg_control);
1911 	} else if (msg_control) {
1912 		struct ubuf_info *uarg = msg_control;
1913 		uarg->ops->complete(NULL, uarg, false);
1914 	}
1915 
1916 	skb_reset_network_header(skb);
1917 	skb_probe_transport_header(skb);
1918 	skb_record_rx_queue(skb, tfile->queue_index);
1919 
1920 	if (skb_xdp) {
1921 		struct bpf_prog *xdp_prog;
1922 		int ret;
1923 
1924 		local_bh_disable();
1925 		rcu_read_lock();
1926 		xdp_prog = rcu_dereference(tun->xdp_prog);
1927 		if (xdp_prog) {
1928 			ret = do_xdp_generic(xdp_prog, &skb);
1929 			if (ret != XDP_PASS) {
1930 				rcu_read_unlock();
1931 				local_bh_enable();
1932 				goto unlock_frags;
1933 			}
1934 		}
1935 		rcu_read_unlock();
1936 		local_bh_enable();
1937 	}
1938 
1939 	/* Compute the costly rx hash only if needed for flow updates.
1940 	 * We may get a very small possibility of OOO during switching, not
1941 	 * worth to optimize.
1942 	 */
1943 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1944 	    !tfile->detached)
1945 		rxhash = __skb_get_hash_symmetric(skb);
1946 
1947 	rcu_read_lock();
1948 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1949 		err = -EIO;
1950 		rcu_read_unlock();
1951 		drop_reason = SKB_DROP_REASON_DEV_READY;
1952 		goto drop;
1953 	}
1954 
1955 	if (frags) {
1956 		u32 headlen;
1957 
1958 		/* Exercise flow dissector code path. */
1959 		skb_push(skb, ETH_HLEN);
1960 		headlen = eth_get_headlen(tun->dev, skb->data,
1961 					  skb_headlen(skb));
1962 
1963 		if (unlikely(headlen > skb_headlen(skb))) {
1964 			WARN_ON_ONCE(1);
1965 			err = -ENOMEM;
1966 			dev_core_stats_rx_dropped_inc(tun->dev);
1967 napi_busy:
1968 			napi_free_frags(&tfile->napi);
1969 			rcu_read_unlock();
1970 			mutex_unlock(&tfile->napi_mutex);
1971 			return err;
1972 		}
1973 
1974 		if (likely(napi_schedule_prep(&tfile->napi))) {
1975 			local_bh_disable();
1976 			napi_gro_frags(&tfile->napi);
1977 			napi_complete(&tfile->napi);
1978 			local_bh_enable();
1979 		} else {
1980 			err = -EBUSY;
1981 			goto napi_busy;
1982 		}
1983 		mutex_unlock(&tfile->napi_mutex);
1984 	} else if (tfile->napi_enabled) {
1985 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1986 		int queue_len;
1987 
1988 		spin_lock_bh(&queue->lock);
1989 
1990 		if (unlikely(tfile->detached)) {
1991 			spin_unlock_bh(&queue->lock);
1992 			rcu_read_unlock();
1993 			err = -EBUSY;
1994 			goto free_skb;
1995 		}
1996 
1997 		__skb_queue_tail(queue, skb);
1998 		queue_len = skb_queue_len(queue);
1999 		spin_unlock(&queue->lock);
2000 
2001 		if (!more || queue_len > NAPI_POLL_WEIGHT)
2002 			napi_schedule(&tfile->napi);
2003 
2004 		local_bh_enable();
2005 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2006 		tun_rx_batched(tun, tfile, skb, more);
2007 	} else {
2008 		netif_rx(skb);
2009 	}
2010 	rcu_read_unlock();
2011 
2012 	preempt_disable();
2013 	dev_sw_netstats_rx_add(tun->dev, len);
2014 	preempt_enable();
2015 
2016 	if (rxhash)
2017 		tun_flow_update(tun, rxhash, tfile);
2018 
2019 	return total_len;
2020 
2021 drop:
2022 	if (err != -EAGAIN)
2023 		dev_core_stats_rx_dropped_inc(tun->dev);
2024 
2025 free_skb:
2026 	if (!IS_ERR_OR_NULL(skb))
2027 		kfree_skb_reason(skb, drop_reason);
2028 
2029 unlock_frags:
2030 	if (frags) {
2031 		tfile->napi.skb = NULL;
2032 		mutex_unlock(&tfile->napi_mutex);
2033 	}
2034 
2035 	return err ?: total_len;
2036 }
2037 
tun_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)2038 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2039 {
2040 	struct file *file = iocb->ki_filp;
2041 	struct tun_file *tfile = file->private_data;
2042 	struct tun_struct *tun = tun_get(tfile);
2043 	ssize_t result;
2044 	int noblock = 0;
2045 
2046 	if (!tun)
2047 		return -EBADFD;
2048 
2049 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2050 		noblock = 1;
2051 
2052 	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2053 
2054 	tun_put(tun);
2055 	return result;
2056 }
2057 
tun_put_user_xdp(struct tun_struct * tun,struct tun_file * tfile,struct xdp_frame * xdp_frame,struct iov_iter * iter)2058 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2059 				struct tun_file *tfile,
2060 				struct xdp_frame *xdp_frame,
2061 				struct iov_iter *iter)
2062 {
2063 	int vnet_hdr_sz = 0;
2064 	size_t size = xdp_frame->len;
2065 	size_t ret;
2066 
2067 	if (tun->flags & IFF_VNET_HDR) {
2068 		struct virtio_net_hdr gso = { 0 };
2069 
2070 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2071 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2072 			return -EINVAL;
2073 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2074 			     sizeof(gso)))
2075 			return -EFAULT;
2076 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2077 	}
2078 
2079 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2080 
2081 	preempt_disable();
2082 	dev_sw_netstats_tx_add(tun->dev, 1, ret);
2083 	preempt_enable();
2084 
2085 	return ret;
2086 }
2087 
2088 /* Put packet to the user space buffer */
tun_put_user(struct tun_struct * tun,struct tun_file * tfile,struct sk_buff * skb,struct iov_iter * iter)2089 static ssize_t tun_put_user(struct tun_struct *tun,
2090 			    struct tun_file *tfile,
2091 			    struct sk_buff *skb,
2092 			    struct iov_iter *iter)
2093 {
2094 	struct tun_pi pi = { 0, skb->protocol };
2095 	ssize_t total;
2096 	int vlan_offset = 0;
2097 	int vlan_hlen = 0;
2098 	int vnet_hdr_sz = 0;
2099 
2100 	if (skb_vlan_tag_present(skb))
2101 		vlan_hlen = VLAN_HLEN;
2102 
2103 	if (tun->flags & IFF_VNET_HDR)
2104 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2105 
2106 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2107 
2108 	if (!(tun->flags & IFF_NO_PI)) {
2109 		if (iov_iter_count(iter) < sizeof(pi))
2110 			return -EINVAL;
2111 
2112 		total += sizeof(pi);
2113 		if (iov_iter_count(iter) < total) {
2114 			/* Packet will be striped */
2115 			pi.flags |= TUN_PKT_STRIP;
2116 		}
2117 
2118 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2119 			return -EFAULT;
2120 	}
2121 
2122 	if (vnet_hdr_sz) {
2123 		struct virtio_net_hdr gso;
2124 
2125 		if (iov_iter_count(iter) < vnet_hdr_sz)
2126 			return -EINVAL;
2127 
2128 		if (virtio_net_hdr_from_skb(skb, &gso,
2129 					    tun_is_little_endian(tun), true,
2130 					    vlan_hlen)) {
2131 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2132 
2133 			if (net_ratelimit()) {
2134 				netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
2135 					   sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2136 					   tun16_to_cpu(tun, gso.hdr_len));
2137 				print_hex_dump(KERN_ERR, "tun: ",
2138 					       DUMP_PREFIX_NONE,
2139 					       16, 1, skb->head,
2140 					       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2141 			}
2142 			WARN_ON_ONCE(1);
2143 			return -EINVAL;
2144 		}
2145 
2146 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2147 			return -EFAULT;
2148 
2149 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2150 	}
2151 
2152 	if (vlan_hlen) {
2153 		int ret;
2154 		struct veth veth;
2155 
2156 		veth.h_vlan_proto = skb->vlan_proto;
2157 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2158 
2159 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2160 
2161 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2162 		if (ret || !iov_iter_count(iter))
2163 			goto done;
2164 
2165 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2166 		if (ret != sizeof(veth) || !iov_iter_count(iter))
2167 			goto done;
2168 	}
2169 
2170 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2171 
2172 done:
2173 	/* caller is in process context, */
2174 	preempt_disable();
2175 	dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2176 	preempt_enable();
2177 
2178 	return total;
2179 }
2180 
tun_ring_recv(struct tun_file * tfile,int noblock,int * err)2181 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2182 {
2183 	DECLARE_WAITQUEUE(wait, current);
2184 	void *ptr = NULL;
2185 	int error = 0;
2186 
2187 	ptr = ptr_ring_consume(&tfile->tx_ring);
2188 	if (ptr)
2189 		goto out;
2190 	if (noblock) {
2191 		error = -EAGAIN;
2192 		goto out;
2193 	}
2194 
2195 	add_wait_queue(&tfile->socket.wq.wait, &wait);
2196 
2197 	while (1) {
2198 		set_current_state(TASK_INTERRUPTIBLE);
2199 		ptr = ptr_ring_consume(&tfile->tx_ring);
2200 		if (ptr)
2201 			break;
2202 		if (signal_pending(current)) {
2203 			error = -ERESTARTSYS;
2204 			break;
2205 		}
2206 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2207 			error = -EFAULT;
2208 			break;
2209 		}
2210 
2211 		schedule();
2212 	}
2213 
2214 	__set_current_state(TASK_RUNNING);
2215 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2216 
2217 out:
2218 	*err = error;
2219 	return ptr;
2220 }
2221 
tun_do_read(struct tun_struct * tun,struct tun_file * tfile,struct iov_iter * to,int noblock,void * ptr)2222 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2223 			   struct iov_iter *to,
2224 			   int noblock, void *ptr)
2225 {
2226 	ssize_t ret;
2227 	int err;
2228 
2229 	if (!iov_iter_count(to)) {
2230 		tun_ptr_free(ptr);
2231 		return 0;
2232 	}
2233 
2234 	if (!ptr) {
2235 		/* Read frames from ring */
2236 		ptr = tun_ring_recv(tfile, noblock, &err);
2237 		if (!ptr)
2238 			return err;
2239 	}
2240 
2241 	if (tun_is_xdp_frame(ptr)) {
2242 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2243 
2244 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2245 		xdp_return_frame(xdpf);
2246 	} else {
2247 		struct sk_buff *skb = ptr;
2248 
2249 		ret = tun_put_user(tun, tfile, skb, to);
2250 		if (unlikely(ret < 0))
2251 			kfree_skb(skb);
2252 		else
2253 			consume_skb(skb);
2254 	}
2255 
2256 	return ret;
2257 }
2258 
tun_chr_read_iter(struct kiocb * iocb,struct iov_iter * to)2259 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2260 {
2261 	struct file *file = iocb->ki_filp;
2262 	struct tun_file *tfile = file->private_data;
2263 	struct tun_struct *tun = tun_get(tfile);
2264 	ssize_t len = iov_iter_count(to), ret;
2265 	int noblock = 0;
2266 
2267 	if (!tun)
2268 		return -EBADFD;
2269 
2270 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2271 		noblock = 1;
2272 
2273 	ret = tun_do_read(tun, tfile, to, noblock, NULL);
2274 	ret = min_t(ssize_t, ret, len);
2275 	if (ret > 0)
2276 		iocb->ki_pos = ret;
2277 	tun_put(tun);
2278 	return ret;
2279 }
2280 
tun_prog_free(struct rcu_head * rcu)2281 static void tun_prog_free(struct rcu_head *rcu)
2282 {
2283 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2284 
2285 	bpf_prog_destroy(prog->prog);
2286 	kfree(prog);
2287 }
2288 
__tun_set_ebpf(struct tun_struct * tun,struct tun_prog __rcu ** prog_p,struct bpf_prog * prog)2289 static int __tun_set_ebpf(struct tun_struct *tun,
2290 			  struct tun_prog __rcu **prog_p,
2291 			  struct bpf_prog *prog)
2292 {
2293 	struct tun_prog *old, *new = NULL;
2294 
2295 	if (prog) {
2296 		new = kmalloc(sizeof(*new), GFP_KERNEL);
2297 		if (!new)
2298 			return -ENOMEM;
2299 		new->prog = prog;
2300 	}
2301 
2302 	spin_lock_bh(&tun->lock);
2303 	old = rcu_dereference_protected(*prog_p,
2304 					lockdep_is_held(&tun->lock));
2305 	rcu_assign_pointer(*prog_p, new);
2306 	spin_unlock_bh(&tun->lock);
2307 
2308 	if (old)
2309 		call_rcu(&old->rcu, tun_prog_free);
2310 
2311 	return 0;
2312 }
2313 
tun_free_netdev(struct net_device * dev)2314 static void tun_free_netdev(struct net_device *dev)
2315 {
2316 	struct tun_struct *tun = netdev_priv(dev);
2317 
2318 	BUG_ON(!(list_empty(&tun->disabled)));
2319 
2320 	tun_flow_uninit(tun);
2321 	security_tun_dev_free_security(tun->security);
2322 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2323 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2324 }
2325 
tun_setup(struct net_device * dev)2326 static void tun_setup(struct net_device *dev)
2327 {
2328 	struct tun_struct *tun = netdev_priv(dev);
2329 
2330 	tun->owner = INVALID_UID;
2331 	tun->group = INVALID_GID;
2332 	tun_default_link_ksettings(dev, &tun->link_ksettings);
2333 
2334 	dev->ethtool_ops = &tun_ethtool_ops;
2335 	dev->needs_free_netdev = true;
2336 	dev->priv_destructor = tun_free_netdev;
2337 	/* We prefer our own queue length */
2338 	dev->tx_queue_len = TUN_READQ_SIZE;
2339 }
2340 
2341 /* Trivial set of netlink ops to allow deleting tun or tap
2342  * device with netlink.
2343  */
tun_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2344 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2345 			struct netlink_ext_ack *extack)
2346 {
2347 	NL_SET_ERR_MSG(extack,
2348 		       "tun/tap creation via rtnetlink is not supported.");
2349 	return -EOPNOTSUPP;
2350 }
2351 
tun_get_size(const struct net_device * dev)2352 static size_t tun_get_size(const struct net_device *dev)
2353 {
2354 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2355 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2356 
2357 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2358 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2359 	       nla_total_size(sizeof(u8)) + /* TYPE */
2360 	       nla_total_size(sizeof(u8)) + /* PI */
2361 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2362 	       nla_total_size(sizeof(u8)) + /* PERSIST */
2363 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2364 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2365 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2366 	       0;
2367 }
2368 
tun_fill_info(struct sk_buff * skb,const struct net_device * dev)2369 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2370 {
2371 	struct tun_struct *tun = netdev_priv(dev);
2372 
2373 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2374 		goto nla_put_failure;
2375 	if (uid_valid(tun->owner) &&
2376 	    nla_put_u32(skb, IFLA_TUN_OWNER,
2377 			from_kuid_munged(current_user_ns(), tun->owner)))
2378 		goto nla_put_failure;
2379 	if (gid_valid(tun->group) &&
2380 	    nla_put_u32(skb, IFLA_TUN_GROUP,
2381 			from_kgid_munged(current_user_ns(), tun->group)))
2382 		goto nla_put_failure;
2383 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2384 		goto nla_put_failure;
2385 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2386 		goto nla_put_failure;
2387 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2388 		goto nla_put_failure;
2389 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2390 		       !!(tun->flags & IFF_MULTI_QUEUE)))
2391 		goto nla_put_failure;
2392 	if (tun->flags & IFF_MULTI_QUEUE) {
2393 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2394 			goto nla_put_failure;
2395 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2396 				tun->numdisabled))
2397 			goto nla_put_failure;
2398 	}
2399 
2400 	return 0;
2401 
2402 nla_put_failure:
2403 	return -EMSGSIZE;
2404 }
2405 
2406 static struct rtnl_link_ops tun_link_ops __read_mostly = {
2407 	.kind		= DRV_NAME,
2408 	.priv_size	= sizeof(struct tun_struct),
2409 	.setup		= tun_setup,
2410 	.validate	= tun_validate,
2411 	.get_size       = tun_get_size,
2412 	.fill_info      = tun_fill_info,
2413 };
2414 
tun_sock_write_space(struct sock * sk)2415 static void tun_sock_write_space(struct sock *sk)
2416 {
2417 	struct tun_file *tfile;
2418 	wait_queue_head_t *wqueue;
2419 
2420 	if (!sock_writeable(sk))
2421 		return;
2422 
2423 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2424 		return;
2425 
2426 	wqueue = sk_sleep(sk);
2427 	if (wqueue && waitqueue_active(wqueue))
2428 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2429 						EPOLLWRNORM | EPOLLWRBAND);
2430 
2431 	tfile = container_of(sk, struct tun_file, sk);
2432 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2433 }
2434 
tun_put_page(struct tun_page * tpage)2435 static void tun_put_page(struct tun_page *tpage)
2436 {
2437 	if (tpage->page)
2438 		__page_frag_cache_drain(tpage->page, tpage->count);
2439 }
2440 
tun_xdp_one(struct tun_struct * tun,struct tun_file * tfile,struct xdp_buff * xdp,int * flush,struct tun_page * tpage)2441 static int tun_xdp_one(struct tun_struct *tun,
2442 		       struct tun_file *tfile,
2443 		       struct xdp_buff *xdp, int *flush,
2444 		       struct tun_page *tpage)
2445 {
2446 	unsigned int datasize = xdp->data_end - xdp->data;
2447 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2448 	struct virtio_net_hdr *gso = &hdr->gso;
2449 	struct bpf_prog *xdp_prog;
2450 	struct sk_buff *skb = NULL;
2451 	struct sk_buff_head *queue;
2452 	u32 rxhash = 0, act;
2453 	int buflen = hdr->buflen;
2454 	int ret = 0;
2455 	bool skb_xdp = false;
2456 	struct page *page;
2457 
2458 	if (unlikely(datasize < ETH_HLEN))
2459 		return -EINVAL;
2460 
2461 	xdp_prog = rcu_dereference(tun->xdp_prog);
2462 	if (xdp_prog) {
2463 		if (gso->gso_type) {
2464 			skb_xdp = true;
2465 			goto build;
2466 		}
2467 
2468 		xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2469 		xdp_set_data_meta_invalid(xdp);
2470 
2471 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2472 		ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2473 		if (ret < 0) {
2474 			put_page(virt_to_head_page(xdp->data));
2475 			return ret;
2476 		}
2477 
2478 		switch (ret) {
2479 		case XDP_REDIRECT:
2480 			*flush = true;
2481 			fallthrough;
2482 		case XDP_TX:
2483 			return 0;
2484 		case XDP_PASS:
2485 			break;
2486 		default:
2487 			page = virt_to_head_page(xdp->data);
2488 			if (tpage->page == page) {
2489 				++tpage->count;
2490 			} else {
2491 				tun_put_page(tpage);
2492 				tpage->page = page;
2493 				tpage->count = 1;
2494 			}
2495 			return 0;
2496 		}
2497 	}
2498 
2499 build:
2500 	skb = build_skb(xdp->data_hard_start, buflen);
2501 	if (!skb) {
2502 		ret = -ENOMEM;
2503 		goto out;
2504 	}
2505 
2506 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2507 	skb_put(skb, xdp->data_end - xdp->data);
2508 
2509 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2510 		atomic_long_inc(&tun->rx_frame_errors);
2511 		kfree_skb(skb);
2512 		ret = -EINVAL;
2513 		goto out;
2514 	}
2515 
2516 	skb->protocol = eth_type_trans(skb, tun->dev);
2517 	skb_reset_network_header(skb);
2518 	skb_probe_transport_header(skb);
2519 	skb_record_rx_queue(skb, tfile->queue_index);
2520 
2521 	if (skb_xdp) {
2522 		ret = do_xdp_generic(xdp_prog, &skb);
2523 		if (ret != XDP_PASS) {
2524 			ret = 0;
2525 			goto out;
2526 		}
2527 	}
2528 
2529 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2530 	    !tfile->detached)
2531 		rxhash = __skb_get_hash_symmetric(skb);
2532 
2533 	if (tfile->napi_enabled) {
2534 		queue = &tfile->sk.sk_write_queue;
2535 		spin_lock(&queue->lock);
2536 
2537 		if (unlikely(tfile->detached)) {
2538 			spin_unlock(&queue->lock);
2539 			kfree_skb(skb);
2540 			return -EBUSY;
2541 		}
2542 
2543 		__skb_queue_tail(queue, skb);
2544 		spin_unlock(&queue->lock);
2545 		ret = 1;
2546 	} else {
2547 		netif_receive_skb(skb);
2548 		ret = 0;
2549 	}
2550 
2551 	/* No need to disable preemption here since this function is
2552 	 * always called with bh disabled
2553 	 */
2554 	dev_sw_netstats_rx_add(tun->dev, datasize);
2555 
2556 	if (rxhash)
2557 		tun_flow_update(tun, rxhash, tfile);
2558 
2559 out:
2560 	return ret;
2561 }
2562 
tun_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)2563 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2564 {
2565 	int ret, i;
2566 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2567 	struct tun_struct *tun = tun_get(tfile);
2568 	struct tun_msg_ctl *ctl = m->msg_control;
2569 	struct xdp_buff *xdp;
2570 
2571 	if (!tun)
2572 		return -EBADFD;
2573 
2574 	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2575 	    ctl && ctl->type == TUN_MSG_PTR) {
2576 		struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
2577 		struct tun_page tpage;
2578 		int n = ctl->num;
2579 		int flush = 0, queued = 0;
2580 
2581 		memset(&tpage, 0, sizeof(tpage));
2582 
2583 		local_bh_disable();
2584 		rcu_read_lock();
2585 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
2586 
2587 		for (i = 0; i < n; i++) {
2588 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2589 			ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2590 			if (ret > 0)
2591 				queued += ret;
2592 		}
2593 
2594 		if (flush)
2595 			xdp_do_flush();
2596 
2597 		if (tfile->napi_enabled && queued > 0)
2598 			napi_schedule(&tfile->napi);
2599 
2600 		bpf_net_ctx_clear(bpf_net_ctx);
2601 		rcu_read_unlock();
2602 		local_bh_enable();
2603 
2604 		tun_put_page(&tpage);
2605 
2606 		ret = total_len;
2607 		goto out;
2608 	}
2609 
2610 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2611 			   m->msg_flags & MSG_DONTWAIT,
2612 			   m->msg_flags & MSG_MORE);
2613 out:
2614 	tun_put(tun);
2615 	return ret;
2616 }
2617 
tun_recvmsg(struct socket * sock,struct msghdr * m,size_t total_len,int flags)2618 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2619 		       int flags)
2620 {
2621 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2622 	struct tun_struct *tun = tun_get(tfile);
2623 	void *ptr = m->msg_control;
2624 	int ret;
2625 
2626 	if (!tun) {
2627 		ret = -EBADFD;
2628 		goto out_free;
2629 	}
2630 
2631 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2632 		ret = -EINVAL;
2633 		goto out_put_tun;
2634 	}
2635 	if (flags & MSG_ERRQUEUE) {
2636 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2637 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2638 		goto out;
2639 	}
2640 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2641 	if (ret > (ssize_t)total_len) {
2642 		m->msg_flags |= MSG_TRUNC;
2643 		ret = flags & MSG_TRUNC ? ret : total_len;
2644 	}
2645 out:
2646 	tun_put(tun);
2647 	return ret;
2648 
2649 out_put_tun:
2650 	tun_put(tun);
2651 out_free:
2652 	tun_ptr_free(ptr);
2653 	return ret;
2654 }
2655 
tun_ptr_peek_len(void * ptr)2656 static int tun_ptr_peek_len(void *ptr)
2657 {
2658 	if (likely(ptr)) {
2659 		if (tun_is_xdp_frame(ptr)) {
2660 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2661 
2662 			return xdpf->len;
2663 		}
2664 		return __skb_array_len_with_tag(ptr);
2665 	} else {
2666 		return 0;
2667 	}
2668 }
2669 
tun_peek_len(struct socket * sock)2670 static int tun_peek_len(struct socket *sock)
2671 {
2672 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2673 	struct tun_struct *tun;
2674 	int ret = 0;
2675 
2676 	tun = tun_get(tfile);
2677 	if (!tun)
2678 		return 0;
2679 
2680 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2681 	tun_put(tun);
2682 
2683 	return ret;
2684 }
2685 
2686 /* Ops structure to mimic raw sockets with tun */
2687 static const struct proto_ops tun_socket_ops = {
2688 	.peek_len = tun_peek_len,
2689 	.sendmsg = tun_sendmsg,
2690 	.recvmsg = tun_recvmsg,
2691 };
2692 
2693 static struct proto tun_proto = {
2694 	.name		= "tun",
2695 	.owner		= THIS_MODULE,
2696 	.obj_size	= sizeof(struct tun_file),
2697 };
2698 
tun_flags(struct tun_struct * tun)2699 static int tun_flags(struct tun_struct *tun)
2700 {
2701 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2702 }
2703 
tun_flags_show(struct device * dev,struct device_attribute * attr,char * buf)2704 static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2705 			      char *buf)
2706 {
2707 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2708 	return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2709 }
2710 
owner_show(struct device * dev,struct device_attribute * attr,char * buf)2711 static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2712 			  char *buf)
2713 {
2714 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2715 	return uid_valid(tun->owner)?
2716 		sysfs_emit(buf, "%u\n",
2717 			   from_kuid_munged(current_user_ns(), tun->owner)) :
2718 		sysfs_emit(buf, "-1\n");
2719 }
2720 
group_show(struct device * dev,struct device_attribute * attr,char * buf)2721 static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2722 			  char *buf)
2723 {
2724 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2725 	return gid_valid(tun->group) ?
2726 		sysfs_emit(buf, "%u\n",
2727 			   from_kgid_munged(current_user_ns(), tun->group)) :
2728 		sysfs_emit(buf, "-1\n");
2729 }
2730 
2731 static DEVICE_ATTR_RO(tun_flags);
2732 static DEVICE_ATTR_RO(owner);
2733 static DEVICE_ATTR_RO(group);
2734 
2735 static struct attribute *tun_dev_attrs[] = {
2736 	&dev_attr_tun_flags.attr,
2737 	&dev_attr_owner.attr,
2738 	&dev_attr_group.attr,
2739 	NULL
2740 };
2741 
2742 static const struct attribute_group tun_attr_group = {
2743 	.attrs = tun_dev_attrs
2744 };
2745 
tun_set_iff(struct net * net,struct file * file,struct ifreq * ifr)2746 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2747 {
2748 	struct tun_struct *tun;
2749 	struct tun_file *tfile = file->private_data;
2750 	struct net_device *dev;
2751 	int err;
2752 
2753 	if (tfile->detached)
2754 		return -EINVAL;
2755 
2756 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2757 		if (!capable(CAP_NET_ADMIN))
2758 			return -EPERM;
2759 
2760 		if (!(ifr->ifr_flags & IFF_NAPI) ||
2761 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2762 			return -EINVAL;
2763 	}
2764 
2765 	dev = __dev_get_by_name(net, ifr->ifr_name);
2766 	if (dev) {
2767 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2768 			return -EBUSY;
2769 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2770 			tun = netdev_priv(dev);
2771 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2772 			tun = netdev_priv(dev);
2773 		else
2774 			return -EINVAL;
2775 
2776 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2777 		    !!(tun->flags & IFF_MULTI_QUEUE))
2778 			return -EINVAL;
2779 
2780 		if (tun_not_capable(tun))
2781 			return -EPERM;
2782 		err = security_tun_dev_open(tun->security);
2783 		if (err < 0)
2784 			return err;
2785 
2786 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2787 				 ifr->ifr_flags & IFF_NAPI,
2788 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2789 		if (err < 0)
2790 			return err;
2791 
2792 		if (tun->flags & IFF_MULTI_QUEUE &&
2793 		    (tun->numqueues + tun->numdisabled > 1)) {
2794 			/* One or more queue has already been attached, no need
2795 			 * to initialize the device again.
2796 			 */
2797 			netdev_state_change(dev);
2798 			return 0;
2799 		}
2800 
2801 		tun->flags = (tun->flags & ~TUN_FEATURES) |
2802 			      (ifr->ifr_flags & TUN_FEATURES);
2803 
2804 		netdev_state_change(dev);
2805 	} else {
2806 		char *name;
2807 		unsigned long flags = 0;
2808 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2809 			     MAX_TAP_QUEUES : 1;
2810 
2811 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2812 			return -EPERM;
2813 		err = security_tun_dev_create();
2814 		if (err < 0)
2815 			return err;
2816 
2817 		/* Set dev type */
2818 		if (ifr->ifr_flags & IFF_TUN) {
2819 			/* TUN device */
2820 			flags |= IFF_TUN;
2821 			name = "tun%d";
2822 		} else if (ifr->ifr_flags & IFF_TAP) {
2823 			/* TAP device */
2824 			flags |= IFF_TAP;
2825 			name = "tap%d";
2826 		} else
2827 			return -EINVAL;
2828 
2829 		if (*ifr->ifr_name)
2830 			name = ifr->ifr_name;
2831 
2832 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2833 				       NET_NAME_UNKNOWN, tun_setup, queues,
2834 				       queues);
2835 
2836 		if (!dev)
2837 			return -ENOMEM;
2838 
2839 		dev_net_set(dev, net);
2840 		dev->rtnl_link_ops = &tun_link_ops;
2841 		dev->ifindex = tfile->ifindex;
2842 		dev->sysfs_groups[0] = &tun_attr_group;
2843 
2844 		tun = netdev_priv(dev);
2845 		tun->dev = dev;
2846 		tun->flags = flags;
2847 		tun->txflt.count = 0;
2848 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2849 
2850 		tun->align = NET_SKB_PAD;
2851 		tun->filter_attached = false;
2852 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2853 		tun->rx_batched = 0;
2854 		RCU_INIT_POINTER(tun->steering_prog, NULL);
2855 
2856 		tun->ifr = ifr;
2857 		tun->file = file;
2858 
2859 		tun_net_initialize(dev);
2860 
2861 		err = register_netdevice(tun->dev);
2862 		if (err < 0) {
2863 			free_netdev(dev);
2864 			return err;
2865 		}
2866 		/* free_netdev() won't check refcnt, to avoid race
2867 		 * with dev_put() we need publish tun after registration.
2868 		 */
2869 		rcu_assign_pointer(tfile->tun, tun);
2870 	}
2871 
2872 	if (ifr->ifr_flags & IFF_NO_CARRIER)
2873 		netif_carrier_off(tun->dev);
2874 	else
2875 		netif_carrier_on(tun->dev);
2876 
2877 	/* Make sure persistent devices do not get stuck in
2878 	 * xoff state.
2879 	 */
2880 	if (netif_running(tun->dev))
2881 		netif_tx_wake_all_queues(tun->dev);
2882 
2883 	strcpy(ifr->ifr_name, tun->dev->name);
2884 	return 0;
2885 }
2886 
tun_get_iff(struct tun_struct * tun,struct ifreq * ifr)2887 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2888 {
2889 	strcpy(ifr->ifr_name, tun->dev->name);
2890 
2891 	ifr->ifr_flags = tun_flags(tun);
2892 
2893 }
2894 
2895 /* This is like a cut-down ethtool ops, except done via tun fd so no
2896  * privs required. */
set_offload(struct tun_struct * tun,unsigned long arg)2897 static int set_offload(struct tun_struct *tun, unsigned long arg)
2898 {
2899 	netdev_features_t features = 0;
2900 
2901 	if (arg & TUN_F_CSUM) {
2902 		features |= NETIF_F_HW_CSUM;
2903 		arg &= ~TUN_F_CSUM;
2904 
2905 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2906 			if (arg & TUN_F_TSO_ECN) {
2907 				features |= NETIF_F_TSO_ECN;
2908 				arg &= ~TUN_F_TSO_ECN;
2909 			}
2910 			if (arg & TUN_F_TSO4)
2911 				features |= NETIF_F_TSO;
2912 			if (arg & TUN_F_TSO6)
2913 				features |= NETIF_F_TSO6;
2914 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2915 		}
2916 
2917 		arg &= ~TUN_F_UFO;
2918 
2919 		/* TODO: for now USO4 and USO6 should work simultaneously */
2920 		if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2921 			features |= NETIF_F_GSO_UDP_L4;
2922 			arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2923 		}
2924 	}
2925 
2926 	/* This gives the user a way to test for new features in future by
2927 	 * trying to set them. */
2928 	if (arg)
2929 		return -EINVAL;
2930 
2931 	tun->set_features = features;
2932 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2933 	tun->dev->wanted_features |= features;
2934 	netdev_update_features(tun->dev);
2935 
2936 	return 0;
2937 }
2938 
tun_detach_filter(struct tun_struct * tun,int n)2939 static void tun_detach_filter(struct tun_struct *tun, int n)
2940 {
2941 	int i;
2942 	struct tun_file *tfile;
2943 
2944 	for (i = 0; i < n; i++) {
2945 		tfile = rtnl_dereference(tun->tfiles[i]);
2946 		lock_sock(tfile->socket.sk);
2947 		sk_detach_filter(tfile->socket.sk);
2948 		release_sock(tfile->socket.sk);
2949 	}
2950 
2951 	tun->filter_attached = false;
2952 }
2953 
tun_attach_filter(struct tun_struct * tun)2954 static int tun_attach_filter(struct tun_struct *tun)
2955 {
2956 	int i, ret = 0;
2957 	struct tun_file *tfile;
2958 
2959 	for (i = 0; i < tun->numqueues; i++) {
2960 		tfile = rtnl_dereference(tun->tfiles[i]);
2961 		lock_sock(tfile->socket.sk);
2962 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2963 		release_sock(tfile->socket.sk);
2964 		if (ret) {
2965 			tun_detach_filter(tun, i);
2966 			return ret;
2967 		}
2968 	}
2969 
2970 	tun->filter_attached = true;
2971 	return ret;
2972 }
2973 
tun_set_sndbuf(struct tun_struct * tun)2974 static void tun_set_sndbuf(struct tun_struct *tun)
2975 {
2976 	struct tun_file *tfile;
2977 	int i;
2978 
2979 	for (i = 0; i < tun->numqueues; i++) {
2980 		tfile = rtnl_dereference(tun->tfiles[i]);
2981 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2982 	}
2983 }
2984 
tun_set_queue(struct file * file,struct ifreq * ifr)2985 static int tun_set_queue(struct file *file, struct ifreq *ifr)
2986 {
2987 	struct tun_file *tfile = file->private_data;
2988 	struct tun_struct *tun;
2989 	int ret = 0;
2990 
2991 	rtnl_lock();
2992 
2993 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2994 		tun = tfile->detached;
2995 		if (!tun) {
2996 			ret = -EINVAL;
2997 			goto unlock;
2998 		}
2999 		ret = security_tun_dev_attach_queue(tun->security);
3000 		if (ret < 0)
3001 			goto unlock;
3002 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
3003 				 tun->flags & IFF_NAPI_FRAGS, true);
3004 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
3005 		tun = rtnl_dereference(tfile->tun);
3006 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
3007 			ret = -EINVAL;
3008 		else
3009 			__tun_detach(tfile, false);
3010 	} else
3011 		ret = -EINVAL;
3012 
3013 	if (ret >= 0)
3014 		netdev_state_change(tun->dev);
3015 
3016 unlock:
3017 	rtnl_unlock();
3018 	return ret;
3019 }
3020 
tun_set_ebpf(struct tun_struct * tun,struct tun_prog __rcu ** prog_p,void __user * data)3021 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3022 			void __user *data)
3023 {
3024 	struct bpf_prog *prog;
3025 	int fd;
3026 
3027 	if (copy_from_user(&fd, data, sizeof(fd)))
3028 		return -EFAULT;
3029 
3030 	if (fd == -1) {
3031 		prog = NULL;
3032 	} else {
3033 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3034 		if (IS_ERR(prog))
3035 			return PTR_ERR(prog);
3036 	}
3037 
3038 	return __tun_set_ebpf(tun, prog_p, prog);
3039 }
3040 
3041 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
tun_get_addr_len(unsigned short type)3042 static unsigned char tun_get_addr_len(unsigned short type)
3043 {
3044 	switch (type) {
3045 	case ARPHRD_IP6GRE:
3046 	case ARPHRD_TUNNEL6:
3047 		return sizeof(struct in6_addr);
3048 	case ARPHRD_IPGRE:
3049 	case ARPHRD_TUNNEL:
3050 	case ARPHRD_SIT:
3051 		return 4;
3052 	case ARPHRD_ETHER:
3053 		return ETH_ALEN;
3054 	case ARPHRD_IEEE802154:
3055 	case ARPHRD_IEEE802154_MONITOR:
3056 		return IEEE802154_EXTENDED_ADDR_LEN;
3057 	case ARPHRD_PHONET_PIPE:
3058 	case ARPHRD_PPP:
3059 	case ARPHRD_NONE:
3060 		return 0;
3061 	case ARPHRD_6LOWPAN:
3062 		return EUI64_ADDR_LEN;
3063 	case ARPHRD_FDDI:
3064 		return FDDI_K_ALEN;
3065 	case ARPHRD_HIPPI:
3066 		return HIPPI_ALEN;
3067 	case ARPHRD_IEEE802:
3068 		return FC_ALEN;
3069 	case ARPHRD_ROSE:
3070 		return ROSE_ADDR_LEN;
3071 	case ARPHRD_NETROM:
3072 		return AX25_ADDR_LEN;
3073 	case ARPHRD_LOCALTLK:
3074 		return LTALK_ALEN;
3075 	default:
3076 		return 0;
3077 	}
3078 }
3079 
__tun_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg,int ifreq_len)3080 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3081 			    unsigned long arg, int ifreq_len)
3082 {
3083 	struct tun_file *tfile = file->private_data;
3084 	struct net *net = sock_net(&tfile->sk);
3085 	struct tun_struct *tun;
3086 	void __user* argp = (void __user*)arg;
3087 	unsigned int carrier;
3088 	struct ifreq ifr;
3089 	kuid_t owner;
3090 	kgid_t group;
3091 	int ifindex;
3092 	int sndbuf;
3093 	int vnet_hdr_sz;
3094 	int le;
3095 	int ret;
3096 	bool do_notify = false;
3097 
3098 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3099 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3100 		if (copy_from_user(&ifr, argp, ifreq_len))
3101 			return -EFAULT;
3102 	} else {
3103 		memset(&ifr, 0, sizeof(ifr));
3104 	}
3105 	if (cmd == TUNGETFEATURES) {
3106 		/* Currently this just means: "what IFF flags are valid?".
3107 		 * This is needed because we never checked for invalid flags on
3108 		 * TUNSETIFF.
3109 		 */
3110 		return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3111 				TUN_FEATURES, (unsigned int __user*)argp);
3112 	} else if (cmd == TUNSETQUEUE) {
3113 		return tun_set_queue(file, &ifr);
3114 	} else if (cmd == SIOCGSKNS) {
3115 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3116 			return -EPERM;
3117 		return open_related_ns(&net->ns, get_net_ns);
3118 	}
3119 
3120 	rtnl_lock();
3121 
3122 	tun = tun_get(tfile);
3123 	if (cmd == TUNSETIFF) {
3124 		ret = -EEXIST;
3125 		if (tun)
3126 			goto unlock;
3127 
3128 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3129 
3130 		ret = tun_set_iff(net, file, &ifr);
3131 
3132 		if (ret)
3133 			goto unlock;
3134 
3135 		if (copy_to_user(argp, &ifr, ifreq_len))
3136 			ret = -EFAULT;
3137 		goto unlock;
3138 	}
3139 	if (cmd == TUNSETIFINDEX) {
3140 		ret = -EPERM;
3141 		if (tun)
3142 			goto unlock;
3143 
3144 		ret = -EFAULT;
3145 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3146 			goto unlock;
3147 		ret = -EINVAL;
3148 		if (ifindex < 0)
3149 			goto unlock;
3150 		ret = 0;
3151 		tfile->ifindex = ifindex;
3152 		goto unlock;
3153 	}
3154 
3155 	ret = -EBADFD;
3156 	if (!tun)
3157 		goto unlock;
3158 
3159 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3160 
3161 	net = dev_net(tun->dev);
3162 	ret = 0;
3163 	switch (cmd) {
3164 	case TUNGETIFF:
3165 		tun_get_iff(tun, &ifr);
3166 
3167 		if (tfile->detached)
3168 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3169 		if (!tfile->socket.sk->sk_filter)
3170 			ifr.ifr_flags |= IFF_NOFILTER;
3171 
3172 		if (copy_to_user(argp, &ifr, ifreq_len))
3173 			ret = -EFAULT;
3174 		break;
3175 
3176 	case TUNSETNOCSUM:
3177 		/* Disable/Enable checksum */
3178 
3179 		/* [unimplemented] */
3180 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3181 			   arg ? "disabled" : "enabled");
3182 		break;
3183 
3184 	case TUNSETPERSIST:
3185 		/* Disable/Enable persist mode. Keep an extra reference to the
3186 		 * module to prevent the module being unprobed.
3187 		 */
3188 		if (arg && !(tun->flags & IFF_PERSIST)) {
3189 			tun->flags |= IFF_PERSIST;
3190 			__module_get(THIS_MODULE);
3191 			do_notify = true;
3192 		}
3193 		if (!arg && (tun->flags & IFF_PERSIST)) {
3194 			tun->flags &= ~IFF_PERSIST;
3195 			module_put(THIS_MODULE);
3196 			do_notify = true;
3197 		}
3198 
3199 		netif_info(tun, drv, tun->dev, "persist %s\n",
3200 			   arg ? "enabled" : "disabled");
3201 		break;
3202 
3203 	case TUNSETOWNER:
3204 		/* Set owner of the device */
3205 		owner = make_kuid(current_user_ns(), arg);
3206 		if (!uid_valid(owner)) {
3207 			ret = -EINVAL;
3208 			break;
3209 		}
3210 		tun->owner = owner;
3211 		do_notify = true;
3212 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
3213 			   from_kuid(&init_user_ns, tun->owner));
3214 		break;
3215 
3216 	case TUNSETGROUP:
3217 		/* Set group of the device */
3218 		group = make_kgid(current_user_ns(), arg);
3219 		if (!gid_valid(group)) {
3220 			ret = -EINVAL;
3221 			break;
3222 		}
3223 		tun->group = group;
3224 		do_notify = true;
3225 		netif_info(tun, drv, tun->dev, "group set to %u\n",
3226 			   from_kgid(&init_user_ns, tun->group));
3227 		break;
3228 
3229 	case TUNSETLINK:
3230 		/* Only allow setting the type when the interface is down */
3231 		if (tun->dev->flags & IFF_UP) {
3232 			netif_info(tun, drv, tun->dev,
3233 				   "Linktype set failed because interface is up\n");
3234 			ret = -EBUSY;
3235 		} else {
3236 			ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3237 						       tun->dev);
3238 			ret = notifier_to_errno(ret);
3239 			if (ret) {
3240 				netif_info(tun, drv, tun->dev,
3241 					   "Refused to change device type\n");
3242 				break;
3243 			}
3244 			tun->dev->type = (int) arg;
3245 			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3246 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3247 				   tun->dev->type);
3248 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3249 						 tun->dev);
3250 		}
3251 		break;
3252 
3253 	case TUNSETDEBUG:
3254 		tun->msg_enable = (u32)arg;
3255 		break;
3256 
3257 	case TUNSETOFFLOAD:
3258 		ret = set_offload(tun, arg);
3259 		break;
3260 
3261 	case TUNSETTXFILTER:
3262 		/* Can be set only for TAPs */
3263 		ret = -EINVAL;
3264 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3265 			break;
3266 		ret = update_filter(&tun->txflt, (void __user *)arg);
3267 		break;
3268 
3269 	case SIOCGIFHWADDR:
3270 		/* Get hw address */
3271 		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3272 		if (copy_to_user(argp, &ifr, ifreq_len))
3273 			ret = -EFAULT;
3274 		break;
3275 
3276 	case SIOCSIFHWADDR:
3277 		/* Set hw address */
3278 		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3279 		break;
3280 
3281 	case TUNGETSNDBUF:
3282 		sndbuf = tfile->socket.sk->sk_sndbuf;
3283 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3284 			ret = -EFAULT;
3285 		break;
3286 
3287 	case TUNSETSNDBUF:
3288 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3289 			ret = -EFAULT;
3290 			break;
3291 		}
3292 		if (sndbuf <= 0) {
3293 			ret = -EINVAL;
3294 			break;
3295 		}
3296 
3297 		tun->sndbuf = sndbuf;
3298 		tun_set_sndbuf(tun);
3299 		break;
3300 
3301 	case TUNGETVNETHDRSZ:
3302 		vnet_hdr_sz = tun->vnet_hdr_sz;
3303 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3304 			ret = -EFAULT;
3305 		break;
3306 
3307 	case TUNSETVNETHDRSZ:
3308 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3309 			ret = -EFAULT;
3310 			break;
3311 		}
3312 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3313 			ret = -EINVAL;
3314 			break;
3315 		}
3316 
3317 		tun->vnet_hdr_sz = vnet_hdr_sz;
3318 		break;
3319 
3320 	case TUNGETVNETLE:
3321 		le = !!(tun->flags & TUN_VNET_LE);
3322 		if (put_user(le, (int __user *)argp))
3323 			ret = -EFAULT;
3324 		break;
3325 
3326 	case TUNSETVNETLE:
3327 		if (get_user(le, (int __user *)argp)) {
3328 			ret = -EFAULT;
3329 			break;
3330 		}
3331 		if (le)
3332 			tun->flags |= TUN_VNET_LE;
3333 		else
3334 			tun->flags &= ~TUN_VNET_LE;
3335 		break;
3336 
3337 	case TUNGETVNETBE:
3338 		ret = tun_get_vnet_be(tun, argp);
3339 		break;
3340 
3341 	case TUNSETVNETBE:
3342 		ret = tun_set_vnet_be(tun, argp);
3343 		break;
3344 
3345 	case TUNATTACHFILTER:
3346 		/* Can be set only for TAPs */
3347 		ret = -EINVAL;
3348 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3349 			break;
3350 		ret = -EFAULT;
3351 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3352 			break;
3353 
3354 		ret = tun_attach_filter(tun);
3355 		break;
3356 
3357 	case TUNDETACHFILTER:
3358 		/* Can be set only for TAPs */
3359 		ret = -EINVAL;
3360 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3361 			break;
3362 		ret = 0;
3363 		tun_detach_filter(tun, tun->numqueues);
3364 		break;
3365 
3366 	case TUNGETFILTER:
3367 		ret = -EINVAL;
3368 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3369 			break;
3370 		ret = -EFAULT;
3371 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3372 			break;
3373 		ret = 0;
3374 		break;
3375 
3376 	case TUNSETSTEERINGEBPF:
3377 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3378 		break;
3379 
3380 	case TUNSETFILTEREBPF:
3381 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3382 		break;
3383 
3384 	case TUNSETCARRIER:
3385 		ret = -EFAULT;
3386 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3387 			goto unlock;
3388 
3389 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3390 		break;
3391 
3392 	case TUNGETDEVNETNS:
3393 		ret = -EPERM;
3394 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3395 			goto unlock;
3396 		ret = open_related_ns(&net->ns, get_net_ns);
3397 		break;
3398 
3399 	default:
3400 		ret = -EINVAL;
3401 		break;
3402 	}
3403 
3404 	if (do_notify)
3405 		netdev_state_change(tun->dev);
3406 
3407 unlock:
3408 	rtnl_unlock();
3409 	if (tun)
3410 		tun_put(tun);
3411 	return ret;
3412 }
3413 
tun_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3414 static long tun_chr_ioctl(struct file *file,
3415 			  unsigned int cmd, unsigned long arg)
3416 {
3417 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3418 }
3419 
3420 #ifdef CONFIG_COMPAT
tun_chr_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3421 static long tun_chr_compat_ioctl(struct file *file,
3422 			 unsigned int cmd, unsigned long arg)
3423 {
3424 	switch (cmd) {
3425 	case TUNSETIFF:
3426 	case TUNGETIFF:
3427 	case TUNSETTXFILTER:
3428 	case TUNGETSNDBUF:
3429 	case TUNSETSNDBUF:
3430 	case SIOCGIFHWADDR:
3431 	case SIOCSIFHWADDR:
3432 		arg = (unsigned long)compat_ptr(arg);
3433 		break;
3434 	default:
3435 		arg = (compat_ulong_t)arg;
3436 		break;
3437 	}
3438 
3439 	/*
3440 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3441 	 * the end of that structure. All fields that are used in this
3442 	 * driver are compatible though, we don't need to convert the
3443 	 * contents.
3444 	 */
3445 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3446 }
3447 #endif /* CONFIG_COMPAT */
3448 
tun_chr_fasync(int fd,struct file * file,int on)3449 static int tun_chr_fasync(int fd, struct file *file, int on)
3450 {
3451 	struct tun_file *tfile = file->private_data;
3452 	int ret;
3453 
3454 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3455 		goto out;
3456 
3457 	if (on) {
3458 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3459 		tfile->flags |= TUN_FASYNC;
3460 	} else
3461 		tfile->flags &= ~TUN_FASYNC;
3462 	ret = 0;
3463 out:
3464 	return ret;
3465 }
3466 
tun_chr_open(struct inode * inode,struct file * file)3467 static int tun_chr_open(struct inode *inode, struct file * file)
3468 {
3469 	struct net *net = current->nsproxy->net_ns;
3470 	struct tun_file *tfile;
3471 
3472 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3473 					    &tun_proto, 0);
3474 	if (!tfile)
3475 		return -ENOMEM;
3476 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3477 		sk_free(&tfile->sk);
3478 		return -ENOMEM;
3479 	}
3480 
3481 	mutex_init(&tfile->napi_mutex);
3482 	RCU_INIT_POINTER(tfile->tun, NULL);
3483 	tfile->flags = 0;
3484 	tfile->ifindex = 0;
3485 
3486 	init_waitqueue_head(&tfile->socket.wq.wait);
3487 
3488 	tfile->socket.file = file;
3489 	tfile->socket.ops = &tun_socket_ops;
3490 
3491 	sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3492 
3493 	tfile->sk.sk_write_space = tun_sock_write_space;
3494 	tfile->sk.sk_sndbuf = INT_MAX;
3495 
3496 	file->private_data = tfile;
3497 	INIT_LIST_HEAD(&tfile->next);
3498 
3499 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3500 
3501 	/* tun groks IOCB_NOWAIT just fine, mark it as such */
3502 	file->f_mode |= FMODE_NOWAIT;
3503 	return 0;
3504 }
3505 
tun_chr_close(struct inode * inode,struct file * file)3506 static int tun_chr_close(struct inode *inode, struct file *file)
3507 {
3508 	struct tun_file *tfile = file->private_data;
3509 
3510 	tun_detach(tfile, true);
3511 
3512 	return 0;
3513 }
3514 
3515 #ifdef CONFIG_PROC_FS
tun_chr_show_fdinfo(struct seq_file * m,struct file * file)3516 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3517 {
3518 	struct tun_file *tfile = file->private_data;
3519 	struct tun_struct *tun;
3520 	struct ifreq ifr;
3521 
3522 	memset(&ifr, 0, sizeof(ifr));
3523 
3524 	rtnl_lock();
3525 	tun = tun_get(tfile);
3526 	if (tun)
3527 		tun_get_iff(tun, &ifr);
3528 	rtnl_unlock();
3529 
3530 	if (tun)
3531 		tun_put(tun);
3532 
3533 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3534 }
3535 #endif
3536 
3537 static const struct file_operations tun_fops = {
3538 	.owner	= THIS_MODULE,
3539 	.llseek = no_llseek,
3540 	.read_iter  = tun_chr_read_iter,
3541 	.write_iter = tun_chr_write_iter,
3542 	.poll	= tun_chr_poll,
3543 	.unlocked_ioctl	= tun_chr_ioctl,
3544 #ifdef CONFIG_COMPAT
3545 	.compat_ioctl = tun_chr_compat_ioctl,
3546 #endif
3547 	.open	= tun_chr_open,
3548 	.release = tun_chr_close,
3549 	.fasync = tun_chr_fasync,
3550 #ifdef CONFIG_PROC_FS
3551 	.show_fdinfo = tun_chr_show_fdinfo,
3552 #endif
3553 };
3554 
3555 static struct miscdevice tun_miscdev = {
3556 	.minor = TUN_MINOR,
3557 	.name = "tun",
3558 	.nodename = "net/tun",
3559 	.fops = &tun_fops,
3560 };
3561 
3562 /* ethtool interface */
3563 
tun_default_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)3564 static void tun_default_link_ksettings(struct net_device *dev,
3565 				       struct ethtool_link_ksettings *cmd)
3566 {
3567 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3568 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3569 	cmd->base.speed		= SPEED_10000;
3570 	cmd->base.duplex	= DUPLEX_FULL;
3571 	cmd->base.port		= PORT_TP;
3572 	cmd->base.phy_address	= 0;
3573 	cmd->base.autoneg	= AUTONEG_DISABLE;
3574 }
3575 
tun_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)3576 static int tun_get_link_ksettings(struct net_device *dev,
3577 				  struct ethtool_link_ksettings *cmd)
3578 {
3579 	struct tun_struct *tun = netdev_priv(dev);
3580 
3581 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3582 	return 0;
3583 }
3584 
tun_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)3585 static int tun_set_link_ksettings(struct net_device *dev,
3586 				  const struct ethtool_link_ksettings *cmd)
3587 {
3588 	struct tun_struct *tun = netdev_priv(dev);
3589 
3590 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3591 	return 0;
3592 }
3593 
tun_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)3594 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3595 {
3596 	struct tun_struct *tun = netdev_priv(dev);
3597 
3598 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3599 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3600 
3601 	switch (tun->flags & TUN_TYPE_MASK) {
3602 	case IFF_TUN:
3603 		strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3604 		break;
3605 	case IFF_TAP:
3606 		strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3607 		break;
3608 	}
3609 }
3610 
tun_get_msglevel(struct net_device * dev)3611 static u32 tun_get_msglevel(struct net_device *dev)
3612 {
3613 	struct tun_struct *tun = netdev_priv(dev);
3614 
3615 	return tun->msg_enable;
3616 }
3617 
tun_set_msglevel(struct net_device * dev,u32 value)3618 static void tun_set_msglevel(struct net_device *dev, u32 value)
3619 {
3620 	struct tun_struct *tun = netdev_priv(dev);
3621 
3622 	tun->msg_enable = value;
3623 }
3624 
tun_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)3625 static int tun_get_coalesce(struct net_device *dev,
3626 			    struct ethtool_coalesce *ec,
3627 			    struct kernel_ethtool_coalesce *kernel_coal,
3628 			    struct netlink_ext_ack *extack)
3629 {
3630 	struct tun_struct *tun = netdev_priv(dev);
3631 
3632 	ec->rx_max_coalesced_frames = tun->rx_batched;
3633 
3634 	return 0;
3635 }
3636 
tun_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)3637 static int tun_set_coalesce(struct net_device *dev,
3638 			    struct ethtool_coalesce *ec,
3639 			    struct kernel_ethtool_coalesce *kernel_coal,
3640 			    struct netlink_ext_ack *extack)
3641 {
3642 	struct tun_struct *tun = netdev_priv(dev);
3643 
3644 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3645 		tun->rx_batched = NAPI_POLL_WEIGHT;
3646 	else
3647 		tun->rx_batched = ec->rx_max_coalesced_frames;
3648 
3649 	return 0;
3650 }
3651 
tun_get_channels(struct net_device * dev,struct ethtool_channels * channels)3652 static void tun_get_channels(struct net_device *dev,
3653 			     struct ethtool_channels *channels)
3654 {
3655 	struct tun_struct *tun = netdev_priv(dev);
3656 
3657 	channels->combined_count = tun->numqueues;
3658 	channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
3659 }
3660 
3661 static const struct ethtool_ops tun_ethtool_ops = {
3662 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3663 	.get_drvinfo	= tun_get_drvinfo,
3664 	.get_msglevel	= tun_get_msglevel,
3665 	.set_msglevel	= tun_set_msglevel,
3666 	.get_link	= ethtool_op_get_link,
3667 	.get_channels   = tun_get_channels,
3668 	.get_ts_info	= ethtool_op_get_ts_info,
3669 	.get_coalesce   = tun_get_coalesce,
3670 	.set_coalesce   = tun_set_coalesce,
3671 	.get_link_ksettings = tun_get_link_ksettings,
3672 	.set_link_ksettings = tun_set_link_ksettings,
3673 };
3674 
tun_queue_resize(struct tun_struct * tun)3675 static int tun_queue_resize(struct tun_struct *tun)
3676 {
3677 	struct net_device *dev = tun->dev;
3678 	struct tun_file *tfile;
3679 	struct ptr_ring **rings;
3680 	int n = tun->numqueues + tun->numdisabled;
3681 	int ret, i;
3682 
3683 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3684 	if (!rings)
3685 		return -ENOMEM;
3686 
3687 	for (i = 0; i < tun->numqueues; i++) {
3688 		tfile = rtnl_dereference(tun->tfiles[i]);
3689 		rings[i] = &tfile->tx_ring;
3690 	}
3691 	list_for_each_entry(tfile, &tun->disabled, next)
3692 		rings[i++] = &tfile->tx_ring;
3693 
3694 	ret = ptr_ring_resize_multiple(rings, n,
3695 				       dev->tx_queue_len, GFP_KERNEL,
3696 				       tun_ptr_free);
3697 
3698 	kfree(rings);
3699 	return ret;
3700 }
3701 
tun_device_event(struct notifier_block * unused,unsigned long event,void * ptr)3702 static int tun_device_event(struct notifier_block *unused,
3703 			    unsigned long event, void *ptr)
3704 {
3705 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3706 	struct tun_struct *tun = netdev_priv(dev);
3707 	int i;
3708 
3709 	if (dev->rtnl_link_ops != &tun_link_ops)
3710 		return NOTIFY_DONE;
3711 
3712 	switch (event) {
3713 	case NETDEV_CHANGE_TX_QUEUE_LEN:
3714 		if (tun_queue_resize(tun))
3715 			return NOTIFY_BAD;
3716 		break;
3717 	case NETDEV_UP:
3718 		for (i = 0; i < tun->numqueues; i++) {
3719 			struct tun_file *tfile;
3720 
3721 			tfile = rtnl_dereference(tun->tfiles[i]);
3722 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3723 		}
3724 		break;
3725 	default:
3726 		break;
3727 	}
3728 
3729 	return NOTIFY_DONE;
3730 }
3731 
3732 static struct notifier_block tun_notifier_block __read_mostly = {
3733 	.notifier_call	= tun_device_event,
3734 };
3735 
tun_init(void)3736 static int __init tun_init(void)
3737 {
3738 	int ret = 0;
3739 
3740 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3741 
3742 	ret = rtnl_link_register(&tun_link_ops);
3743 	if (ret) {
3744 		pr_err("Can't register link_ops\n");
3745 		goto err_linkops;
3746 	}
3747 
3748 	ret = misc_register(&tun_miscdev);
3749 	if (ret) {
3750 		pr_err("Can't register misc device %d\n", TUN_MINOR);
3751 		goto err_misc;
3752 	}
3753 
3754 	ret = register_netdevice_notifier(&tun_notifier_block);
3755 	if (ret) {
3756 		pr_err("Can't register netdevice notifier\n");
3757 		goto err_notifier;
3758 	}
3759 
3760 	return  0;
3761 
3762 err_notifier:
3763 	misc_deregister(&tun_miscdev);
3764 err_misc:
3765 	rtnl_link_unregister(&tun_link_ops);
3766 err_linkops:
3767 	return ret;
3768 }
3769 
tun_cleanup(void)3770 static void __exit tun_cleanup(void)
3771 {
3772 	misc_deregister(&tun_miscdev);
3773 	rtnl_link_unregister(&tun_link_ops);
3774 	unregister_netdevice_notifier(&tun_notifier_block);
3775 }
3776 
3777 /* Get an underlying socket object from tun file.  Returns error unless file is
3778  * attached to a device.  The returned object works like a packet socket, it
3779  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3780  * holding a reference to the file for as long as the socket is in use. */
tun_get_socket(struct file * file)3781 struct socket *tun_get_socket(struct file *file)
3782 {
3783 	struct tun_file *tfile;
3784 	if (file->f_op != &tun_fops)
3785 		return ERR_PTR(-EINVAL);
3786 	tfile = file->private_data;
3787 	if (!tfile)
3788 		return ERR_PTR(-EBADFD);
3789 	return &tfile->socket;
3790 }
3791 EXPORT_SYMBOL_GPL(tun_get_socket);
3792 
tun_get_tx_ring(struct file * file)3793 struct ptr_ring *tun_get_tx_ring(struct file *file)
3794 {
3795 	struct tun_file *tfile;
3796 
3797 	if (file->f_op != &tun_fops)
3798 		return ERR_PTR(-EINVAL);
3799 	tfile = file->private_data;
3800 	if (!tfile)
3801 		return ERR_PTR(-EBADFD);
3802 	return &tfile->tx_ring;
3803 }
3804 EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3805 
3806 module_init(tun_init);
3807 module_exit(tun_cleanup);
3808 MODULE_DESCRIPTION(DRV_DESCRIPTION);
3809 MODULE_AUTHOR(DRV_COPYRIGHT);
3810 MODULE_LICENSE("GPL");
3811 MODULE_ALIAS_MISCDEV(TUN_MINOR);
3812 MODULE_ALIAS("devname:net/tun");
3813