xref: /linux/drivers/net/tun.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  TUN - Universal TUN/TAP device driver.
4  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5  *
6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7  */
8 
9 /*
10  *  Changes:
11  *
12  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13  *    Add TUNSETLINK ioctl to set the link encapsulation
14  *
15  *  Mark Smith <markzzzsmith@yahoo.com.au>
16  *    Use eth_random_addr() for tap MAC address.
17  *
18  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
19  *    Fixes in packet dropping, queue length setting and queue wakeup.
20  *    Increased default tx queue length.
21  *    Added ethtool API.
22  *    Minor cleanups
23  *
24  *  Daniel Podlejski <underley@underley.eu.org>
25  *    Modifications for 2.3.99-pre5 kernel.
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #define DRV_NAME	"tun"
31 #define DRV_VERSION	"1.6"
32 #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
33 #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34 
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/sched/signal.h>
39 #include <linux/major.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/miscdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/compat.h>
51 #include <linux/if.h>
52 #include <linux/if_arp.h>
53 #include <linux/if_ether.h>
54 #include <linux/if_tun.h>
55 #include <linux/if_vlan.h>
56 #include <linux/crc32.h>
57 #include <linux/math.h>
58 #include <linux/nsproxy.h>
59 #include <linux/virtio_net.h>
60 #include <linux/rcupdate.h>
61 #include <net/net_namespace.h>
62 #include <net/netns/generic.h>
63 #include <net/rtnetlink.h>
64 #include <net/sock.h>
65 #include <net/xdp.h>
66 #include <net/ip_tunnels.h>
67 #include <linux/seq_file.h>
68 #include <linux/uio.h>
69 #include <linux/skb_array.h>
70 #include <linux/bpf.h>
71 #include <linux/bpf_trace.h>
72 #include <linux/mutex.h>
73 #include <linux/ieee802154.h>
74 #include <linux/if_ltalk.h>
75 #include <uapi/linux/if_fddi.h>
76 #include <uapi/linux/if_hippi.h>
77 #include <uapi/linux/if_fc.h>
78 #include <net/ax25.h>
79 #include <net/rose.h>
80 #include <net/6lowpan.h>
81 #include <net/rps.h>
82 
83 #include <linux/uaccess.h>
84 #include <linux/proc_fs.h>
85 
86 static void tun_default_link_ksettings(struct net_device *dev,
87 				       struct ethtool_link_ksettings *cmd);
88 
89 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
90 
91 /* TUN device flags */
92 
93 /* IFF_ATTACH_QUEUE is never stored in device flags,
94  * overload it to mean fasync when stored there.
95  */
96 #define TUN_FASYNC	IFF_ATTACH_QUEUE
97 /* High bits in flags field are unused. */
98 #define TUN_VNET_LE     0x80000000
99 #define TUN_VNET_BE     0x40000000
100 
101 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
102 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
103 
104 #define GOODCOPY_LEN 128
105 
106 #define FLT_EXACT_COUNT 8
107 struct tap_filter {
108 	unsigned int    count;    /* Number of addrs. Zero means disabled */
109 	u32             mask[2];  /* Mask of the hashed addrs */
110 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
111 };
112 
113 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
114  * to max number of VCPUs in guest. */
115 #define MAX_TAP_QUEUES 256
116 #define MAX_TAP_FLOWS  4096
117 
118 #define TUN_FLOW_EXPIRE (3 * HZ)
119 
120 /* A tun_file connects an open character device to a tuntap netdevice. It
121  * also contains all socket related structures (except sock_fprog and tap_filter)
122  * to serve as one transmit queue for tuntap device. The sock_fprog and
123  * tap_filter were kept in tun_struct since they were used for filtering for the
124  * netdevice not for a specific queue (at least I didn't see the requirement for
125  * this).
126  *
127  * RCU usage:
128  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129  * other can only be read while rcu_read_lock or rtnl_lock is held.
130  */
131 struct tun_file {
132 	struct sock sk;
133 	struct socket socket;
134 	struct tun_struct __rcu *tun;
135 	struct fasync_struct *fasync;
136 	/* only used for fasnyc */
137 	unsigned int flags;
138 	union {
139 		u16 queue_index;
140 		unsigned int ifindex;
141 	};
142 	struct napi_struct napi;
143 	bool napi_enabled;
144 	bool napi_frags_enabled;
145 	struct mutex napi_mutex;	/* Protects access to the above napi */
146 	struct list_head next;
147 	struct tun_struct *detached;
148 	struct ptr_ring tx_ring;
149 	struct xdp_rxq_info xdp_rxq;
150 };
151 
152 struct tun_page {
153 	struct page *page;
154 	int count;
155 };
156 
157 struct tun_flow_entry {
158 	struct hlist_node hash_link;
159 	struct rcu_head rcu;
160 	struct tun_struct *tun;
161 
162 	u32 rxhash;
163 	u32 rps_rxhash;
164 	int queue_index;
165 	unsigned long updated ____cacheline_aligned_in_smp;
166 };
167 
168 #define TUN_NUM_FLOW_ENTRIES 1024
169 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
170 
171 struct tun_prog {
172 	struct rcu_head rcu;
173 	struct bpf_prog *prog;
174 };
175 
176 /* Since the socket were moved to tun_file, to preserve the behavior of persist
177  * device, socket filter, sndbuf and vnet header size were restore when the
178  * file were attached to a persist device.
179  */
180 struct tun_struct {
181 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
182 	unsigned int            numqueues;
183 	unsigned int 		flags;
184 	kuid_t			owner;
185 	kgid_t			group;
186 
187 	struct net_device	*dev;
188 	netdev_features_t	set_features;
189 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 			  NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
191 
192 	int			align;
193 	int			vnet_hdr_sz;
194 	int			sndbuf;
195 	struct tap_filter	txflt;
196 	struct sock_fprog	fprog;
197 	/* protected by rtnl lock */
198 	bool			filter_attached;
199 	u32			msg_enable;
200 	spinlock_t lock;
201 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
202 	struct timer_list flow_gc_timer;
203 	unsigned long ageing_time;
204 	unsigned int numdisabled;
205 	struct list_head disabled;
206 	void *security;
207 	u32 flow_count;
208 	u32 rx_batched;
209 	atomic_long_t rx_frame_errors;
210 	struct bpf_prog __rcu *xdp_prog;
211 	struct tun_prog __rcu *steering_prog;
212 	struct tun_prog __rcu *filter_prog;
213 	struct ethtool_link_ksettings link_ksettings;
214 	/* init args */
215 	struct file *file;
216 	struct ifreq *ifr;
217 };
218 
219 struct veth {
220 	__be16 h_vlan_proto;
221 	__be16 h_vlan_TCI;
222 };
223 
224 static void tun_flow_init(struct tun_struct *tun);
225 static void tun_flow_uninit(struct tun_struct *tun);
226 
227 static int tun_napi_receive(struct napi_struct *napi, int budget)
228 {
229 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
230 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
231 	struct sk_buff_head process_queue;
232 	struct sk_buff *skb;
233 	int received = 0;
234 
235 	__skb_queue_head_init(&process_queue);
236 
237 	spin_lock(&queue->lock);
238 	skb_queue_splice_tail_init(queue, &process_queue);
239 	spin_unlock(&queue->lock);
240 
241 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
242 		napi_gro_receive(napi, skb);
243 		++received;
244 	}
245 
246 	if (!skb_queue_empty(&process_queue)) {
247 		spin_lock(&queue->lock);
248 		skb_queue_splice(&process_queue, queue);
249 		spin_unlock(&queue->lock);
250 	}
251 
252 	return received;
253 }
254 
255 static int tun_napi_poll(struct napi_struct *napi, int budget)
256 {
257 	unsigned int received;
258 
259 	received = tun_napi_receive(napi, budget);
260 
261 	if (received < budget)
262 		napi_complete_done(napi, received);
263 
264 	return received;
265 }
266 
267 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
268 			  bool napi_en, bool napi_frags)
269 {
270 	tfile->napi_enabled = napi_en;
271 	tfile->napi_frags_enabled = napi_en && napi_frags;
272 	if (napi_en) {
273 		netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
274 		napi_enable(&tfile->napi);
275 	}
276 }
277 
278 static void tun_napi_enable(struct tun_file *tfile)
279 {
280 	if (tfile->napi_enabled)
281 		napi_enable(&tfile->napi);
282 }
283 
284 static void tun_napi_disable(struct tun_file *tfile)
285 {
286 	if (tfile->napi_enabled)
287 		napi_disable(&tfile->napi);
288 }
289 
290 static void tun_napi_del(struct tun_file *tfile)
291 {
292 	if (tfile->napi_enabled)
293 		netif_napi_del(&tfile->napi);
294 }
295 
296 static bool tun_napi_frags_enabled(const struct tun_file *tfile)
297 {
298 	return tfile->napi_frags_enabled;
299 }
300 
301 #ifdef CONFIG_TUN_VNET_CROSS_LE
302 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
303 {
304 	return tun->flags & TUN_VNET_BE ? false :
305 		virtio_legacy_is_little_endian();
306 }
307 
308 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
309 {
310 	int be = !!(tun->flags & TUN_VNET_BE);
311 
312 	if (put_user(be, argp))
313 		return -EFAULT;
314 
315 	return 0;
316 }
317 
318 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
319 {
320 	int be;
321 
322 	if (get_user(be, argp))
323 		return -EFAULT;
324 
325 	if (be)
326 		tun->flags |= TUN_VNET_BE;
327 	else
328 		tun->flags &= ~TUN_VNET_BE;
329 
330 	return 0;
331 }
332 #else
333 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
334 {
335 	return virtio_legacy_is_little_endian();
336 }
337 
338 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
339 {
340 	return -EINVAL;
341 }
342 
343 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
344 {
345 	return -EINVAL;
346 }
347 #endif /* CONFIG_TUN_VNET_CROSS_LE */
348 
349 static inline bool tun_is_little_endian(struct tun_struct *tun)
350 {
351 	return tun->flags & TUN_VNET_LE ||
352 		tun_legacy_is_little_endian(tun);
353 }
354 
355 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
356 {
357 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
358 }
359 
360 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
361 {
362 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
363 }
364 
365 static inline u32 tun_hashfn(u32 rxhash)
366 {
367 	return rxhash & TUN_MASK_FLOW_ENTRIES;
368 }
369 
370 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
371 {
372 	struct tun_flow_entry *e;
373 
374 	hlist_for_each_entry_rcu(e, head, hash_link) {
375 		if (e->rxhash == rxhash)
376 			return e;
377 	}
378 	return NULL;
379 }
380 
381 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
382 					      struct hlist_head *head,
383 					      u32 rxhash, u16 queue_index)
384 {
385 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
386 
387 	if (e) {
388 		netif_info(tun, tx_queued, tun->dev,
389 			   "create flow: hash %u index %u\n",
390 			   rxhash, queue_index);
391 		e->updated = jiffies;
392 		e->rxhash = rxhash;
393 		e->rps_rxhash = 0;
394 		e->queue_index = queue_index;
395 		e->tun = tun;
396 		hlist_add_head_rcu(&e->hash_link, head);
397 		++tun->flow_count;
398 	}
399 	return e;
400 }
401 
402 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
403 {
404 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
405 		   e->rxhash, e->queue_index);
406 	hlist_del_rcu(&e->hash_link);
407 	kfree_rcu(e, rcu);
408 	--tun->flow_count;
409 }
410 
411 static void tun_flow_flush(struct tun_struct *tun)
412 {
413 	int i;
414 
415 	spin_lock_bh(&tun->lock);
416 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
417 		struct tun_flow_entry *e;
418 		struct hlist_node *n;
419 
420 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
421 			tun_flow_delete(tun, e);
422 	}
423 	spin_unlock_bh(&tun->lock);
424 }
425 
426 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
427 {
428 	int i;
429 
430 	spin_lock_bh(&tun->lock);
431 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
432 		struct tun_flow_entry *e;
433 		struct hlist_node *n;
434 
435 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
436 			if (e->queue_index == queue_index)
437 				tun_flow_delete(tun, e);
438 		}
439 	}
440 	spin_unlock_bh(&tun->lock);
441 }
442 
443 static void tun_flow_cleanup(struct timer_list *t)
444 {
445 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
446 	unsigned long delay = tun->ageing_time;
447 	unsigned long next_timer = jiffies + delay;
448 	unsigned long count = 0;
449 	int i;
450 
451 	spin_lock(&tun->lock);
452 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
453 		struct tun_flow_entry *e;
454 		struct hlist_node *n;
455 
456 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
457 			unsigned long this_timer;
458 
459 			this_timer = e->updated + delay;
460 			if (time_before_eq(this_timer, jiffies)) {
461 				tun_flow_delete(tun, e);
462 				continue;
463 			}
464 			count++;
465 			if (time_before(this_timer, next_timer))
466 				next_timer = this_timer;
467 		}
468 	}
469 
470 	if (count)
471 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
472 	spin_unlock(&tun->lock);
473 }
474 
475 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
476 			    struct tun_file *tfile)
477 {
478 	struct hlist_head *head;
479 	struct tun_flow_entry *e;
480 	unsigned long delay = tun->ageing_time;
481 	u16 queue_index = tfile->queue_index;
482 
483 	head = &tun->flows[tun_hashfn(rxhash)];
484 
485 	rcu_read_lock();
486 
487 	e = tun_flow_find(head, rxhash);
488 	if (likely(e)) {
489 		/* TODO: keep queueing to old queue until it's empty? */
490 		if (READ_ONCE(e->queue_index) != queue_index)
491 			WRITE_ONCE(e->queue_index, queue_index);
492 		if (e->updated != jiffies)
493 			e->updated = jiffies;
494 		sock_rps_record_flow_hash(e->rps_rxhash);
495 	} else {
496 		spin_lock_bh(&tun->lock);
497 		if (!tun_flow_find(head, rxhash) &&
498 		    tun->flow_count < MAX_TAP_FLOWS)
499 			tun_flow_create(tun, head, rxhash, queue_index);
500 
501 		if (!timer_pending(&tun->flow_gc_timer))
502 			mod_timer(&tun->flow_gc_timer,
503 				  round_jiffies_up(jiffies + delay));
504 		spin_unlock_bh(&tun->lock);
505 	}
506 
507 	rcu_read_unlock();
508 }
509 
510 /* Save the hash received in the stack receive path and update the
511  * flow_hash table accordingly.
512  */
513 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
514 {
515 	if (unlikely(e->rps_rxhash != hash))
516 		e->rps_rxhash = hash;
517 }
518 
519 /* We try to identify a flow through its rxhash. The reason that
520  * we do not check rxq no. is because some cards(e.g 82599), chooses
521  * the rxq based on the txq where the last packet of the flow comes. As
522  * the userspace application move between processors, we may get a
523  * different rxq no. here.
524  */
525 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
526 {
527 	struct tun_flow_entry *e;
528 	u32 txq, numqueues;
529 
530 	numqueues = READ_ONCE(tun->numqueues);
531 
532 	txq = __skb_get_hash_symmetric(skb);
533 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
534 	if (e) {
535 		tun_flow_save_rps_rxhash(e, txq);
536 		txq = e->queue_index;
537 	} else {
538 		txq = reciprocal_scale(txq, numqueues);
539 	}
540 
541 	return txq;
542 }
543 
544 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
545 {
546 	struct tun_prog *prog;
547 	u32 numqueues;
548 	u16 ret = 0;
549 
550 	numqueues = READ_ONCE(tun->numqueues);
551 	if (!numqueues)
552 		return 0;
553 
554 	prog = rcu_dereference(tun->steering_prog);
555 	if (prog)
556 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
557 
558 	return ret % numqueues;
559 }
560 
561 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
562 			    struct net_device *sb_dev)
563 {
564 	struct tun_struct *tun = netdev_priv(dev);
565 	u16 ret;
566 
567 	rcu_read_lock();
568 	if (rcu_dereference(tun->steering_prog))
569 		ret = tun_ebpf_select_queue(tun, skb);
570 	else
571 		ret = tun_automq_select_queue(tun, skb);
572 	rcu_read_unlock();
573 
574 	return ret;
575 }
576 
577 static inline bool tun_not_capable(struct tun_struct *tun)
578 {
579 	const struct cred *cred = current_cred();
580 	struct net *net = dev_net(tun->dev);
581 
582 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
583 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
584 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
585 }
586 
587 static void tun_set_real_num_queues(struct tun_struct *tun)
588 {
589 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
590 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
591 }
592 
593 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
594 {
595 	tfile->detached = tun;
596 	list_add_tail(&tfile->next, &tun->disabled);
597 	++tun->numdisabled;
598 }
599 
600 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
601 {
602 	struct tun_struct *tun = tfile->detached;
603 
604 	tfile->detached = NULL;
605 	list_del_init(&tfile->next);
606 	--tun->numdisabled;
607 	return tun;
608 }
609 
610 void tun_ptr_free(void *ptr)
611 {
612 	if (!ptr)
613 		return;
614 	if (tun_is_xdp_frame(ptr)) {
615 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
616 
617 		xdp_return_frame(xdpf);
618 	} else {
619 		__skb_array_destroy_skb(ptr);
620 	}
621 }
622 EXPORT_SYMBOL_GPL(tun_ptr_free);
623 
624 static void tun_queue_purge(struct tun_file *tfile)
625 {
626 	void *ptr;
627 
628 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
629 		tun_ptr_free(ptr);
630 
631 	skb_queue_purge(&tfile->sk.sk_write_queue);
632 	skb_queue_purge(&tfile->sk.sk_error_queue);
633 }
634 
635 static void __tun_detach(struct tun_file *tfile, bool clean)
636 {
637 	struct tun_file *ntfile;
638 	struct tun_struct *tun;
639 
640 	tun = rtnl_dereference(tfile->tun);
641 
642 	if (tun && clean) {
643 		if (!tfile->detached)
644 			tun_napi_disable(tfile);
645 		tun_napi_del(tfile);
646 	}
647 
648 	if (tun && !tfile->detached) {
649 		u16 index = tfile->queue_index;
650 		BUG_ON(index >= tun->numqueues);
651 
652 		rcu_assign_pointer(tun->tfiles[index],
653 				   tun->tfiles[tun->numqueues - 1]);
654 		ntfile = rtnl_dereference(tun->tfiles[index]);
655 		ntfile->queue_index = index;
656 		ntfile->xdp_rxq.queue_index = index;
657 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
658 				   NULL);
659 
660 		--tun->numqueues;
661 		if (clean) {
662 			RCU_INIT_POINTER(tfile->tun, NULL);
663 			sock_put(&tfile->sk);
664 		} else {
665 			tun_disable_queue(tun, tfile);
666 			tun_napi_disable(tfile);
667 		}
668 
669 		synchronize_net();
670 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
671 		/* Drop read queue */
672 		tun_queue_purge(tfile);
673 		tun_set_real_num_queues(tun);
674 	} else if (tfile->detached && clean) {
675 		tun = tun_enable_queue(tfile);
676 		sock_put(&tfile->sk);
677 	}
678 
679 	if (clean) {
680 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
681 			netif_carrier_off(tun->dev);
682 
683 			if (!(tun->flags & IFF_PERSIST) &&
684 			    tun->dev->reg_state == NETREG_REGISTERED)
685 				unregister_netdevice(tun->dev);
686 		}
687 		if (tun)
688 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
690 	}
691 }
692 
693 static void tun_detach(struct tun_file *tfile, bool clean)
694 {
695 	struct tun_struct *tun;
696 	struct net_device *dev;
697 
698 	rtnl_lock();
699 	tun = rtnl_dereference(tfile->tun);
700 	dev = tun ? tun->dev : NULL;
701 	__tun_detach(tfile, clean);
702 	if (dev)
703 		netdev_state_change(dev);
704 	rtnl_unlock();
705 
706 	if (clean)
707 		sock_put(&tfile->sk);
708 }
709 
710 static void tun_detach_all(struct net_device *dev)
711 {
712 	struct tun_struct *tun = netdev_priv(dev);
713 	struct tun_file *tfile, *tmp;
714 	int i, n = tun->numqueues;
715 
716 	for (i = 0; i < n; i++) {
717 		tfile = rtnl_dereference(tun->tfiles[i]);
718 		BUG_ON(!tfile);
719 		tun_napi_disable(tfile);
720 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
721 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
722 		RCU_INIT_POINTER(tfile->tun, NULL);
723 		--tun->numqueues;
724 	}
725 	list_for_each_entry(tfile, &tun->disabled, next) {
726 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
727 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
728 		RCU_INIT_POINTER(tfile->tun, NULL);
729 	}
730 	BUG_ON(tun->numqueues != 0);
731 
732 	synchronize_net();
733 	for (i = 0; i < n; i++) {
734 		tfile = rtnl_dereference(tun->tfiles[i]);
735 		tun_napi_del(tfile);
736 		/* Drop read queue */
737 		tun_queue_purge(tfile);
738 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
739 		sock_put(&tfile->sk);
740 	}
741 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
742 		tun_napi_del(tfile);
743 		tun_enable_queue(tfile);
744 		tun_queue_purge(tfile);
745 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
746 		sock_put(&tfile->sk);
747 	}
748 	BUG_ON(tun->numdisabled != 0);
749 
750 	if (tun->flags & IFF_PERSIST)
751 		module_put(THIS_MODULE);
752 }
753 
754 static int tun_attach(struct tun_struct *tun, struct file *file,
755 		      bool skip_filter, bool napi, bool napi_frags,
756 		      bool publish_tun)
757 {
758 	struct tun_file *tfile = file->private_data;
759 	struct net_device *dev = tun->dev;
760 	int err;
761 
762 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
763 	if (err < 0)
764 		goto out;
765 
766 	err = -EINVAL;
767 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
768 		goto out;
769 
770 	err = -EBUSY;
771 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
772 		goto out;
773 
774 	err = -E2BIG;
775 	if (!tfile->detached &&
776 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
777 		goto out;
778 
779 	err = 0;
780 
781 	/* Re-attach the filter to persist device */
782 	if (!skip_filter && (tun->filter_attached == true)) {
783 		lock_sock(tfile->socket.sk);
784 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
785 		release_sock(tfile->socket.sk);
786 		if (!err)
787 			goto out;
788 	}
789 
790 	if (!tfile->detached &&
791 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
792 			    GFP_KERNEL, tun_ptr_free)) {
793 		err = -ENOMEM;
794 		goto out;
795 	}
796 
797 	tfile->queue_index = tun->numqueues;
798 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
799 
800 	if (tfile->detached) {
801 		/* Re-attach detached tfile, updating XDP queue_index */
802 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
803 
804 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
805 			tfile->xdp_rxq.queue_index = tfile->queue_index;
806 	} else {
807 		/* Setup XDP RX-queue info, for new tfile getting attached */
808 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
809 				       tun->dev, tfile->queue_index, 0);
810 		if (err < 0)
811 			goto out;
812 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
813 						 MEM_TYPE_PAGE_SHARED, NULL);
814 		if (err < 0) {
815 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
816 			goto out;
817 		}
818 		err = 0;
819 	}
820 
821 	if (tfile->detached) {
822 		tun_enable_queue(tfile);
823 		tun_napi_enable(tfile);
824 	} else {
825 		sock_hold(&tfile->sk);
826 		tun_napi_init(tun, tfile, napi, napi_frags);
827 	}
828 
829 	if (rtnl_dereference(tun->xdp_prog))
830 		sock_set_flag(&tfile->sk, SOCK_XDP);
831 
832 	/* device is allowed to go away first, so no need to hold extra
833 	 * refcnt.
834 	 */
835 
836 	/* Publish tfile->tun and tun->tfiles only after we've fully
837 	 * initialized tfile; otherwise we risk using half-initialized
838 	 * object.
839 	 */
840 	if (publish_tun)
841 		rcu_assign_pointer(tfile->tun, tun);
842 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
843 	tun->numqueues++;
844 	tun_set_real_num_queues(tun);
845 out:
846 	return err;
847 }
848 
849 static struct tun_struct *tun_get(struct tun_file *tfile)
850 {
851 	struct tun_struct *tun;
852 
853 	rcu_read_lock();
854 	tun = rcu_dereference(tfile->tun);
855 	if (tun)
856 		dev_hold(tun->dev);
857 	rcu_read_unlock();
858 
859 	return tun;
860 }
861 
862 static void tun_put(struct tun_struct *tun)
863 {
864 	dev_put(tun->dev);
865 }
866 
867 /* TAP filtering */
868 static void addr_hash_set(u32 *mask, const u8 *addr)
869 {
870 	int n = ether_crc(ETH_ALEN, addr) >> 26;
871 	mask[n >> 5] |= (1 << (n & 31));
872 }
873 
874 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
875 {
876 	int n = ether_crc(ETH_ALEN, addr) >> 26;
877 	return mask[n >> 5] & (1 << (n & 31));
878 }
879 
880 static int update_filter(struct tap_filter *filter, void __user *arg)
881 {
882 	struct { u8 u[ETH_ALEN]; } *addr;
883 	struct tun_filter uf;
884 	int err, alen, n, nexact;
885 
886 	if (copy_from_user(&uf, arg, sizeof(uf)))
887 		return -EFAULT;
888 
889 	if (!uf.count) {
890 		/* Disabled */
891 		filter->count = 0;
892 		return 0;
893 	}
894 
895 	alen = ETH_ALEN * uf.count;
896 	addr = memdup_user(arg + sizeof(uf), alen);
897 	if (IS_ERR(addr))
898 		return PTR_ERR(addr);
899 
900 	/* The filter is updated without holding any locks. Which is
901 	 * perfectly safe. We disable it first and in the worst
902 	 * case we'll accept a few undesired packets. */
903 	filter->count = 0;
904 	wmb();
905 
906 	/* Use first set of addresses as an exact filter */
907 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
908 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
909 
910 	nexact = n;
911 
912 	/* Remaining multicast addresses are hashed,
913 	 * unicast will leave the filter disabled. */
914 	memset(filter->mask, 0, sizeof(filter->mask));
915 	for (; n < uf.count; n++) {
916 		if (!is_multicast_ether_addr(addr[n].u)) {
917 			err = 0; /* no filter */
918 			goto free_addr;
919 		}
920 		addr_hash_set(filter->mask, addr[n].u);
921 	}
922 
923 	/* For ALLMULTI just set the mask to all ones.
924 	 * This overrides the mask populated above. */
925 	if ((uf.flags & TUN_FLT_ALLMULTI))
926 		memset(filter->mask, ~0, sizeof(filter->mask));
927 
928 	/* Now enable the filter */
929 	wmb();
930 	filter->count = nexact;
931 
932 	/* Return the number of exact filters */
933 	err = nexact;
934 free_addr:
935 	kfree(addr);
936 	return err;
937 }
938 
939 /* Returns: 0 - drop, !=0 - accept */
940 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
941 {
942 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
943 	 * at this point. */
944 	struct ethhdr *eh = (struct ethhdr *) skb->data;
945 	int i;
946 
947 	/* Exact match */
948 	for (i = 0; i < filter->count; i++)
949 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
950 			return 1;
951 
952 	/* Inexact match (multicast only) */
953 	if (is_multicast_ether_addr(eh->h_dest))
954 		return addr_hash_test(filter->mask, eh->h_dest);
955 
956 	return 0;
957 }
958 
959 /*
960  * Checks whether the packet is accepted or not.
961  * Returns: 0 - drop, !=0 - accept
962  */
963 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
964 {
965 	if (!filter->count)
966 		return 1;
967 
968 	return run_filter(filter, skb);
969 }
970 
971 /* Network device part of the driver */
972 
973 static const struct ethtool_ops tun_ethtool_ops;
974 
975 static int tun_net_init(struct net_device *dev)
976 {
977 	struct tun_struct *tun = netdev_priv(dev);
978 	struct ifreq *ifr = tun->ifr;
979 	int err;
980 
981 	spin_lock_init(&tun->lock);
982 
983 	err = security_tun_dev_alloc_security(&tun->security);
984 	if (err < 0)
985 		return err;
986 
987 	tun_flow_init(tun);
988 
989 	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
990 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
991 			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
992 			   NETIF_F_HW_VLAN_STAG_TX;
993 	dev->features = dev->hw_features | NETIF_F_LLTX;
994 	dev->vlan_features = dev->features &
995 			     ~(NETIF_F_HW_VLAN_CTAG_TX |
996 			       NETIF_F_HW_VLAN_STAG_TX);
997 
998 	tun->flags = (tun->flags & ~TUN_FEATURES) |
999 		      (ifr->ifr_flags & TUN_FEATURES);
1000 
1001 	INIT_LIST_HEAD(&tun->disabled);
1002 	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1003 			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1004 	if (err < 0) {
1005 		tun_flow_uninit(tun);
1006 		security_tun_dev_free_security(tun->security);
1007 		return err;
1008 	}
1009 	return 0;
1010 }
1011 
1012 /* Net device detach from fd. */
1013 static void tun_net_uninit(struct net_device *dev)
1014 {
1015 	tun_detach_all(dev);
1016 }
1017 
1018 /* Net device open. */
1019 static int tun_net_open(struct net_device *dev)
1020 {
1021 	netif_tx_start_all_queues(dev);
1022 
1023 	return 0;
1024 }
1025 
1026 /* Net device close. */
1027 static int tun_net_close(struct net_device *dev)
1028 {
1029 	netif_tx_stop_all_queues(dev);
1030 	return 0;
1031 }
1032 
1033 /* Net device start xmit */
1034 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1035 {
1036 #ifdef CONFIG_RPS
1037 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1038 		/* Select queue was not called for the skbuff, so we extract the
1039 		 * RPS hash and save it into the flow_table here.
1040 		 */
1041 		struct tun_flow_entry *e;
1042 		__u32 rxhash;
1043 
1044 		rxhash = __skb_get_hash_symmetric(skb);
1045 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1046 		if (e)
1047 			tun_flow_save_rps_rxhash(e, rxhash);
1048 	}
1049 #endif
1050 }
1051 
1052 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1053 				    struct sk_buff *skb,
1054 				    int len)
1055 {
1056 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1057 
1058 	if (prog)
1059 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1060 
1061 	return len;
1062 }
1063 
1064 /* Net device start xmit */
1065 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1066 {
1067 	struct tun_struct *tun = netdev_priv(dev);
1068 	enum skb_drop_reason drop_reason;
1069 	int txq = skb->queue_mapping;
1070 	struct netdev_queue *queue;
1071 	struct tun_file *tfile;
1072 	int len = skb->len;
1073 
1074 	rcu_read_lock();
1075 	tfile = rcu_dereference(tun->tfiles[txq]);
1076 
1077 	/* Drop packet if interface is not attached */
1078 	if (!tfile) {
1079 		drop_reason = SKB_DROP_REASON_DEV_READY;
1080 		goto drop;
1081 	}
1082 
1083 	if (!rcu_dereference(tun->steering_prog))
1084 		tun_automq_xmit(tun, skb);
1085 
1086 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1087 
1088 	/* Drop if the filter does not like it.
1089 	 * This is a noop if the filter is disabled.
1090 	 * Filter can be enabled only for the TAP devices. */
1091 	if (!check_filter(&tun->txflt, skb)) {
1092 		drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1093 		goto drop;
1094 	}
1095 
1096 	if (tfile->socket.sk->sk_filter &&
1097 	    sk_filter(tfile->socket.sk, skb)) {
1098 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1099 		goto drop;
1100 	}
1101 
1102 	len = run_ebpf_filter(tun, skb, len);
1103 	if (len == 0) {
1104 		drop_reason = SKB_DROP_REASON_TAP_FILTER;
1105 		goto drop;
1106 	}
1107 
1108 	if (pskb_trim(skb, len)) {
1109 		drop_reason = SKB_DROP_REASON_NOMEM;
1110 		goto drop;
1111 	}
1112 
1113 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1114 		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1115 		goto drop;
1116 	}
1117 
1118 	skb_tx_timestamp(skb);
1119 
1120 	/* Orphan the skb - required as we might hang on to it
1121 	 * for indefinite time.
1122 	 */
1123 	skb_orphan(skb);
1124 
1125 	nf_reset_ct(skb);
1126 
1127 	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1128 		drop_reason = SKB_DROP_REASON_FULL_RING;
1129 		goto drop;
1130 	}
1131 
1132 	/* NETIF_F_LLTX requires to do our own update of trans_start */
1133 	queue = netdev_get_tx_queue(dev, txq);
1134 	txq_trans_cond_update(queue);
1135 
1136 	/* Notify and wake up reader process */
1137 	if (tfile->flags & TUN_FASYNC)
1138 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1139 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1140 
1141 	rcu_read_unlock();
1142 	return NETDEV_TX_OK;
1143 
1144 drop:
1145 	dev_core_stats_tx_dropped_inc(dev);
1146 	skb_tx_error(skb);
1147 	kfree_skb_reason(skb, drop_reason);
1148 	rcu_read_unlock();
1149 	return NET_XMIT_DROP;
1150 }
1151 
1152 static void tun_net_mclist(struct net_device *dev)
1153 {
1154 	/*
1155 	 * This callback is supposed to deal with mc filter in
1156 	 * _rx_ path and has nothing to do with the _tx_ path.
1157 	 * In rx path we always accept everything userspace gives us.
1158 	 */
1159 }
1160 
1161 static netdev_features_t tun_net_fix_features(struct net_device *dev,
1162 	netdev_features_t features)
1163 {
1164 	struct tun_struct *tun = netdev_priv(dev);
1165 
1166 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1167 }
1168 
1169 static void tun_set_headroom(struct net_device *dev, int new_hr)
1170 {
1171 	struct tun_struct *tun = netdev_priv(dev);
1172 
1173 	if (new_hr < NET_SKB_PAD)
1174 		new_hr = NET_SKB_PAD;
1175 
1176 	tun->align = new_hr;
1177 }
1178 
1179 static void
1180 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1181 {
1182 	struct tun_struct *tun = netdev_priv(dev);
1183 
1184 	dev_get_tstats64(dev, stats);
1185 
1186 	stats->rx_frame_errors +=
1187 		(unsigned long)atomic_long_read(&tun->rx_frame_errors);
1188 }
1189 
1190 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1191 		       struct netlink_ext_ack *extack)
1192 {
1193 	struct tun_struct *tun = netdev_priv(dev);
1194 	struct tun_file *tfile;
1195 	struct bpf_prog *old_prog;
1196 	int i;
1197 
1198 	old_prog = rtnl_dereference(tun->xdp_prog);
1199 	rcu_assign_pointer(tun->xdp_prog, prog);
1200 	if (old_prog)
1201 		bpf_prog_put(old_prog);
1202 
1203 	for (i = 0; i < tun->numqueues; i++) {
1204 		tfile = rtnl_dereference(tun->tfiles[i]);
1205 		if (prog)
1206 			sock_set_flag(&tfile->sk, SOCK_XDP);
1207 		else
1208 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1209 	}
1210 	list_for_each_entry(tfile, &tun->disabled, next) {
1211 		if (prog)
1212 			sock_set_flag(&tfile->sk, SOCK_XDP);
1213 		else
1214 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1221 {
1222 	switch (xdp->command) {
1223 	case XDP_SETUP_PROG:
1224 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1225 	default:
1226 		return -EINVAL;
1227 	}
1228 }
1229 
1230 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1231 {
1232 	if (new_carrier) {
1233 		struct tun_struct *tun = netdev_priv(dev);
1234 
1235 		if (!tun->numqueues)
1236 			return -EPERM;
1237 
1238 		netif_carrier_on(dev);
1239 	} else {
1240 		netif_carrier_off(dev);
1241 	}
1242 	return 0;
1243 }
1244 
1245 static const struct net_device_ops tun_netdev_ops = {
1246 	.ndo_init		= tun_net_init,
1247 	.ndo_uninit		= tun_net_uninit,
1248 	.ndo_open		= tun_net_open,
1249 	.ndo_stop		= tun_net_close,
1250 	.ndo_start_xmit		= tun_net_xmit,
1251 	.ndo_fix_features	= tun_net_fix_features,
1252 	.ndo_select_queue	= tun_select_queue,
1253 	.ndo_set_rx_headroom	= tun_set_headroom,
1254 	.ndo_get_stats64	= tun_net_get_stats64,
1255 	.ndo_change_carrier	= tun_net_change_carrier,
1256 };
1257 
1258 static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1259 {
1260 	/* Notify and wake up reader process */
1261 	if (tfile->flags & TUN_FASYNC)
1262 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1263 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1264 }
1265 
1266 static int tun_xdp_xmit(struct net_device *dev, int n,
1267 			struct xdp_frame **frames, u32 flags)
1268 {
1269 	struct tun_struct *tun = netdev_priv(dev);
1270 	struct tun_file *tfile;
1271 	u32 numqueues;
1272 	int nxmit = 0;
1273 	int i;
1274 
1275 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1276 		return -EINVAL;
1277 
1278 	rcu_read_lock();
1279 
1280 resample:
1281 	numqueues = READ_ONCE(tun->numqueues);
1282 	if (!numqueues) {
1283 		rcu_read_unlock();
1284 		return -ENXIO; /* Caller will free/return all frames */
1285 	}
1286 
1287 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1288 					    numqueues]);
1289 	if (unlikely(!tfile))
1290 		goto resample;
1291 
1292 	spin_lock(&tfile->tx_ring.producer_lock);
1293 	for (i = 0; i < n; i++) {
1294 		struct xdp_frame *xdp = frames[i];
1295 		/* Encode the XDP flag into lowest bit for consumer to differ
1296 		 * XDP buffer from sk_buff.
1297 		 */
1298 		void *frame = tun_xdp_to_ptr(xdp);
1299 
1300 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1301 			dev_core_stats_tx_dropped_inc(dev);
1302 			break;
1303 		}
1304 		nxmit++;
1305 	}
1306 	spin_unlock(&tfile->tx_ring.producer_lock);
1307 
1308 	if (flags & XDP_XMIT_FLUSH)
1309 		__tun_xdp_flush_tfile(tfile);
1310 
1311 	rcu_read_unlock();
1312 	return nxmit;
1313 }
1314 
1315 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1316 {
1317 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1318 	int nxmit;
1319 
1320 	if (unlikely(!frame))
1321 		return -EOVERFLOW;
1322 
1323 	nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1324 	if (!nxmit)
1325 		xdp_return_frame_rx_napi(frame);
1326 	return nxmit;
1327 }
1328 
1329 static const struct net_device_ops tap_netdev_ops = {
1330 	.ndo_init		= tun_net_init,
1331 	.ndo_uninit		= tun_net_uninit,
1332 	.ndo_open		= tun_net_open,
1333 	.ndo_stop		= tun_net_close,
1334 	.ndo_start_xmit		= tun_net_xmit,
1335 	.ndo_fix_features	= tun_net_fix_features,
1336 	.ndo_set_rx_mode	= tun_net_mclist,
1337 	.ndo_set_mac_address	= eth_mac_addr,
1338 	.ndo_validate_addr	= eth_validate_addr,
1339 	.ndo_select_queue	= tun_select_queue,
1340 	.ndo_features_check	= passthru_features_check,
1341 	.ndo_set_rx_headroom	= tun_set_headroom,
1342 	.ndo_bpf		= tun_xdp,
1343 	.ndo_xdp_xmit		= tun_xdp_xmit,
1344 	.ndo_change_carrier	= tun_net_change_carrier,
1345 };
1346 
1347 static void tun_flow_init(struct tun_struct *tun)
1348 {
1349 	int i;
1350 
1351 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1352 		INIT_HLIST_HEAD(&tun->flows[i]);
1353 
1354 	tun->ageing_time = TUN_FLOW_EXPIRE;
1355 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1356 	mod_timer(&tun->flow_gc_timer,
1357 		  round_jiffies_up(jiffies + tun->ageing_time));
1358 }
1359 
1360 static void tun_flow_uninit(struct tun_struct *tun)
1361 {
1362 	del_timer_sync(&tun->flow_gc_timer);
1363 	tun_flow_flush(tun);
1364 }
1365 
1366 #define MIN_MTU 68
1367 #define MAX_MTU 65535
1368 
1369 /* Initialize net device. */
1370 static void tun_net_initialize(struct net_device *dev)
1371 {
1372 	struct tun_struct *tun = netdev_priv(dev);
1373 
1374 	switch (tun->flags & TUN_TYPE_MASK) {
1375 	case IFF_TUN:
1376 		dev->netdev_ops = &tun_netdev_ops;
1377 		dev->header_ops = &ip_tunnel_header_ops;
1378 
1379 		/* Point-to-Point TUN Device */
1380 		dev->hard_header_len = 0;
1381 		dev->addr_len = 0;
1382 		dev->mtu = 1500;
1383 
1384 		/* Zero header length */
1385 		dev->type = ARPHRD_NONE;
1386 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1387 		break;
1388 
1389 	case IFF_TAP:
1390 		dev->netdev_ops = &tap_netdev_ops;
1391 		/* Ethernet TAP Device */
1392 		ether_setup(dev);
1393 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1394 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1395 
1396 		eth_hw_addr_random(dev);
1397 
1398 		/* Currently tun does not support XDP, only tap does. */
1399 		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1400 				    NETDEV_XDP_ACT_REDIRECT |
1401 				    NETDEV_XDP_ACT_NDO_XMIT;
1402 
1403 		break;
1404 	}
1405 
1406 	dev->min_mtu = MIN_MTU;
1407 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1408 }
1409 
1410 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1411 {
1412 	struct sock *sk = tfile->socket.sk;
1413 
1414 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1415 }
1416 
1417 /* Character device part */
1418 
1419 /* Poll */
1420 static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1421 {
1422 	struct tun_file *tfile = file->private_data;
1423 	struct tun_struct *tun = tun_get(tfile);
1424 	struct sock *sk;
1425 	__poll_t mask = 0;
1426 
1427 	if (!tun)
1428 		return EPOLLERR;
1429 
1430 	sk = tfile->socket.sk;
1431 
1432 	poll_wait(file, sk_sleep(sk), wait);
1433 
1434 	if (!ptr_ring_empty(&tfile->tx_ring))
1435 		mask |= EPOLLIN | EPOLLRDNORM;
1436 
1437 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1438 	 * guarantee EPOLLOUT to be raised by either here or
1439 	 * tun_sock_write_space(). Then process could get notification
1440 	 * after it writes to a down device and meets -EIO.
1441 	 */
1442 	if (tun_sock_writeable(tun, tfile) ||
1443 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1444 	     tun_sock_writeable(tun, tfile)))
1445 		mask |= EPOLLOUT | EPOLLWRNORM;
1446 
1447 	if (tun->dev->reg_state != NETREG_REGISTERED)
1448 		mask = EPOLLERR;
1449 
1450 	tun_put(tun);
1451 	return mask;
1452 }
1453 
1454 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1455 					    size_t len,
1456 					    const struct iov_iter *it)
1457 {
1458 	struct sk_buff *skb;
1459 	size_t linear;
1460 	int err;
1461 	int i;
1462 
1463 	if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1464 	    len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1465 		return ERR_PTR(-EMSGSIZE);
1466 
1467 	local_bh_disable();
1468 	skb = napi_get_frags(&tfile->napi);
1469 	local_bh_enable();
1470 	if (!skb)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	linear = iov_iter_single_seg_count(it);
1474 	err = __skb_grow(skb, linear);
1475 	if (err)
1476 		goto free;
1477 
1478 	skb->len = len;
1479 	skb->data_len = len - linear;
1480 	skb->truesize += skb->data_len;
1481 
1482 	for (i = 1; i < it->nr_segs; i++) {
1483 		const struct iovec *iov = iter_iov(it);
1484 		size_t fragsz = iov->iov_len;
1485 		struct page *page;
1486 		void *frag;
1487 
1488 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1489 			err = -EINVAL;
1490 			goto free;
1491 		}
1492 		frag = netdev_alloc_frag(fragsz);
1493 		if (!frag) {
1494 			err = -ENOMEM;
1495 			goto free;
1496 		}
1497 		page = virt_to_head_page(frag);
1498 		skb_fill_page_desc(skb, i - 1, page,
1499 				   frag - page_address(page), fragsz);
1500 	}
1501 
1502 	return skb;
1503 free:
1504 	/* frees skb and all frags allocated with napi_alloc_frag() */
1505 	napi_free_frags(&tfile->napi);
1506 	return ERR_PTR(err);
1507 }
1508 
1509 /* prepad is the amount to reserve at front.  len is length after that.
1510  * linear is a hint as to how much to copy (usually headers). */
1511 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1512 				     size_t prepad, size_t len,
1513 				     size_t linear, int noblock)
1514 {
1515 	struct sock *sk = tfile->socket.sk;
1516 	struct sk_buff *skb;
1517 	int err;
1518 
1519 	/* Under a page?  Don't bother with paged skb. */
1520 	if (prepad + len < PAGE_SIZE)
1521 		linear = len;
1522 
1523 	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1524 		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1525 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1526 				   &err, PAGE_ALLOC_COSTLY_ORDER);
1527 	if (!skb)
1528 		return ERR_PTR(err);
1529 
1530 	skb_reserve(skb, prepad);
1531 	skb_put(skb, linear);
1532 	skb->data_len = len - linear;
1533 	skb->len += len - linear;
1534 
1535 	return skb;
1536 }
1537 
1538 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1539 			   struct sk_buff *skb, int more)
1540 {
1541 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1542 	struct sk_buff_head process_queue;
1543 	u32 rx_batched = tun->rx_batched;
1544 	bool rcv = false;
1545 
1546 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1547 		local_bh_disable();
1548 		skb_record_rx_queue(skb, tfile->queue_index);
1549 		netif_receive_skb(skb);
1550 		local_bh_enable();
1551 		return;
1552 	}
1553 
1554 	spin_lock(&queue->lock);
1555 	if (!more || skb_queue_len(queue) == rx_batched) {
1556 		__skb_queue_head_init(&process_queue);
1557 		skb_queue_splice_tail_init(queue, &process_queue);
1558 		rcv = true;
1559 	} else {
1560 		__skb_queue_tail(queue, skb);
1561 	}
1562 	spin_unlock(&queue->lock);
1563 
1564 	if (rcv) {
1565 		struct sk_buff *nskb;
1566 
1567 		local_bh_disable();
1568 		while ((nskb = __skb_dequeue(&process_queue))) {
1569 			skb_record_rx_queue(nskb, tfile->queue_index);
1570 			netif_receive_skb(nskb);
1571 		}
1572 		skb_record_rx_queue(skb, tfile->queue_index);
1573 		netif_receive_skb(skb);
1574 		local_bh_enable();
1575 	}
1576 }
1577 
1578 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1579 			      int len, int noblock, bool zerocopy)
1580 {
1581 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1582 		return false;
1583 
1584 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1585 		return false;
1586 
1587 	if (!noblock)
1588 		return false;
1589 
1590 	if (zerocopy)
1591 		return false;
1592 
1593 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1594 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1595 		return false;
1596 
1597 	return true;
1598 }
1599 
1600 static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1601 				       struct page_frag *alloc_frag, char *buf,
1602 				       int buflen, int len, int pad)
1603 {
1604 	struct sk_buff *skb = build_skb(buf, buflen);
1605 
1606 	if (!skb)
1607 		return ERR_PTR(-ENOMEM);
1608 
1609 	skb_reserve(skb, pad);
1610 	skb_put(skb, len);
1611 	skb_set_owner_w(skb, tfile->socket.sk);
1612 
1613 	get_page(alloc_frag->page);
1614 	alloc_frag->offset += buflen;
1615 
1616 	return skb;
1617 }
1618 
1619 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1620 		       struct xdp_buff *xdp, u32 act)
1621 {
1622 	int err;
1623 
1624 	switch (act) {
1625 	case XDP_REDIRECT:
1626 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1627 		if (err) {
1628 			dev_core_stats_rx_dropped_inc(tun->dev);
1629 			return err;
1630 		}
1631 		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1632 		break;
1633 	case XDP_TX:
1634 		err = tun_xdp_tx(tun->dev, xdp);
1635 		if (err < 0) {
1636 			dev_core_stats_rx_dropped_inc(tun->dev);
1637 			return err;
1638 		}
1639 		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1640 		break;
1641 	case XDP_PASS:
1642 		break;
1643 	default:
1644 		bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1645 		fallthrough;
1646 	case XDP_ABORTED:
1647 		trace_xdp_exception(tun->dev, xdp_prog, act);
1648 		fallthrough;
1649 	case XDP_DROP:
1650 		dev_core_stats_rx_dropped_inc(tun->dev);
1651 		break;
1652 	}
1653 
1654 	return act;
1655 }
1656 
1657 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1658 				     struct tun_file *tfile,
1659 				     struct iov_iter *from,
1660 				     struct virtio_net_hdr *hdr,
1661 				     int len, int *skb_xdp)
1662 {
1663 	struct page_frag *alloc_frag = &current->task_frag;
1664 	struct bpf_prog *xdp_prog;
1665 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1666 	char *buf;
1667 	size_t copied;
1668 	int pad = TUN_RX_PAD;
1669 	int err = 0;
1670 
1671 	rcu_read_lock();
1672 	xdp_prog = rcu_dereference(tun->xdp_prog);
1673 	if (xdp_prog)
1674 		pad += XDP_PACKET_HEADROOM;
1675 	buflen += SKB_DATA_ALIGN(len + pad);
1676 	rcu_read_unlock();
1677 
1678 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1679 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1680 		return ERR_PTR(-ENOMEM);
1681 
1682 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1683 	copied = copy_page_from_iter(alloc_frag->page,
1684 				     alloc_frag->offset + pad,
1685 				     len, from);
1686 	if (copied != len)
1687 		return ERR_PTR(-EFAULT);
1688 
1689 	/* There's a small window that XDP may be set after the check
1690 	 * of xdp_prog above, this should be rare and for simplicity
1691 	 * we do XDP on skb in case the headroom is not enough.
1692 	 */
1693 	if (hdr->gso_type || !xdp_prog) {
1694 		*skb_xdp = 1;
1695 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1696 				       pad);
1697 	}
1698 
1699 	*skb_xdp = 0;
1700 
1701 	local_bh_disable();
1702 	rcu_read_lock();
1703 	xdp_prog = rcu_dereference(tun->xdp_prog);
1704 	if (xdp_prog) {
1705 		struct xdp_buff xdp;
1706 		u32 act;
1707 
1708 		xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1709 		xdp_prepare_buff(&xdp, buf, pad, len, false);
1710 
1711 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1712 		if (act == XDP_REDIRECT || act == XDP_TX) {
1713 			get_page(alloc_frag->page);
1714 			alloc_frag->offset += buflen;
1715 		}
1716 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1717 		if (err < 0) {
1718 			if (act == XDP_REDIRECT || act == XDP_TX)
1719 				put_page(alloc_frag->page);
1720 			goto out;
1721 		}
1722 
1723 		if (err == XDP_REDIRECT)
1724 			xdp_do_flush();
1725 		if (err != XDP_PASS)
1726 			goto out;
1727 
1728 		pad = xdp.data - xdp.data_hard_start;
1729 		len = xdp.data_end - xdp.data;
1730 	}
1731 	rcu_read_unlock();
1732 	local_bh_enable();
1733 
1734 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1735 
1736 out:
1737 	rcu_read_unlock();
1738 	local_bh_enable();
1739 	return NULL;
1740 }
1741 
1742 /* Get packet from user space buffer */
1743 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1744 			    void *msg_control, struct iov_iter *from,
1745 			    int noblock, bool more)
1746 {
1747 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1748 	struct sk_buff *skb;
1749 	size_t total_len = iov_iter_count(from);
1750 	size_t len = total_len, align = tun->align, linear;
1751 	struct virtio_net_hdr gso = { 0 };
1752 	int good_linear;
1753 	int copylen;
1754 	bool zerocopy = false;
1755 	int err;
1756 	u32 rxhash = 0;
1757 	int skb_xdp = 1;
1758 	bool frags = tun_napi_frags_enabled(tfile);
1759 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1760 
1761 	if (!(tun->flags & IFF_NO_PI)) {
1762 		if (len < sizeof(pi))
1763 			return -EINVAL;
1764 		len -= sizeof(pi);
1765 
1766 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1767 			return -EFAULT;
1768 	}
1769 
1770 	if (tun->flags & IFF_VNET_HDR) {
1771 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1772 
1773 		if (len < vnet_hdr_sz)
1774 			return -EINVAL;
1775 		len -= vnet_hdr_sz;
1776 
1777 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1778 			return -EFAULT;
1779 
1780 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1781 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1782 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1783 
1784 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1785 			return -EINVAL;
1786 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1787 	}
1788 
1789 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1790 		align += NET_IP_ALIGN;
1791 		if (unlikely(len < ETH_HLEN ||
1792 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1793 			return -EINVAL;
1794 	}
1795 
1796 	good_linear = SKB_MAX_HEAD(align);
1797 
1798 	if (msg_control) {
1799 		struct iov_iter i = *from;
1800 
1801 		/* There are 256 bytes to be copied in skb, so there is
1802 		 * enough room for skb expand head in case it is used.
1803 		 * The rest of the buffer is mapped from userspace.
1804 		 */
1805 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1806 		if (copylen > good_linear)
1807 			copylen = good_linear;
1808 		linear = copylen;
1809 		iov_iter_advance(&i, copylen);
1810 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1811 			zerocopy = true;
1812 	}
1813 
1814 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1815 		/* For the packet that is not easy to be processed
1816 		 * (e.g gso or jumbo packet), we will do it at after
1817 		 * skb was created with generic XDP routine.
1818 		 */
1819 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1820 		err = PTR_ERR_OR_ZERO(skb);
1821 		if (err)
1822 			goto drop;
1823 		if (!skb)
1824 			return total_len;
1825 	} else {
1826 		if (!zerocopy) {
1827 			copylen = len;
1828 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1829 				linear = good_linear;
1830 			else
1831 				linear = tun16_to_cpu(tun, gso.hdr_len);
1832 		}
1833 
1834 		if (frags) {
1835 			mutex_lock(&tfile->napi_mutex);
1836 			skb = tun_napi_alloc_frags(tfile, copylen, from);
1837 			/* tun_napi_alloc_frags() enforces a layout for the skb.
1838 			 * If zerocopy is enabled, then this layout will be
1839 			 * overwritten by zerocopy_sg_from_iter().
1840 			 */
1841 			zerocopy = false;
1842 		} else {
1843 			if (!linear)
1844 				linear = min_t(size_t, good_linear, copylen);
1845 
1846 			skb = tun_alloc_skb(tfile, align, copylen, linear,
1847 					    noblock);
1848 		}
1849 
1850 		err = PTR_ERR_OR_ZERO(skb);
1851 		if (err)
1852 			goto drop;
1853 
1854 		if (zerocopy)
1855 			err = zerocopy_sg_from_iter(skb, from);
1856 		else
1857 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1858 
1859 		if (err) {
1860 			err = -EFAULT;
1861 			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1862 			goto drop;
1863 		}
1864 	}
1865 
1866 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1867 		atomic_long_inc(&tun->rx_frame_errors);
1868 		err = -EINVAL;
1869 		goto free_skb;
1870 	}
1871 
1872 	switch (tun->flags & TUN_TYPE_MASK) {
1873 	case IFF_TUN:
1874 		if (tun->flags & IFF_NO_PI) {
1875 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1876 
1877 			switch (ip_version) {
1878 			case 4:
1879 				pi.proto = htons(ETH_P_IP);
1880 				break;
1881 			case 6:
1882 				pi.proto = htons(ETH_P_IPV6);
1883 				break;
1884 			default:
1885 				err = -EINVAL;
1886 				goto drop;
1887 			}
1888 		}
1889 
1890 		skb_reset_mac_header(skb);
1891 		skb->protocol = pi.proto;
1892 		skb->dev = tun->dev;
1893 		break;
1894 	case IFF_TAP:
1895 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1896 			err = -ENOMEM;
1897 			drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1898 			goto drop;
1899 		}
1900 		skb->protocol = eth_type_trans(skb, tun->dev);
1901 		break;
1902 	}
1903 
1904 	/* copy skb_ubuf_info for callback when skb has no error */
1905 	if (zerocopy) {
1906 		skb_zcopy_init(skb, msg_control);
1907 	} else if (msg_control) {
1908 		struct ubuf_info *uarg = msg_control;
1909 		uarg->callback(NULL, uarg, false);
1910 	}
1911 
1912 	skb_reset_network_header(skb);
1913 	skb_probe_transport_header(skb);
1914 	skb_record_rx_queue(skb, tfile->queue_index);
1915 
1916 	if (skb_xdp) {
1917 		struct bpf_prog *xdp_prog;
1918 		int ret;
1919 
1920 		local_bh_disable();
1921 		rcu_read_lock();
1922 		xdp_prog = rcu_dereference(tun->xdp_prog);
1923 		if (xdp_prog) {
1924 			ret = do_xdp_generic(xdp_prog, &skb);
1925 			if (ret != XDP_PASS) {
1926 				rcu_read_unlock();
1927 				local_bh_enable();
1928 				goto unlock_frags;
1929 			}
1930 		}
1931 		rcu_read_unlock();
1932 		local_bh_enable();
1933 	}
1934 
1935 	/* Compute the costly rx hash only if needed for flow updates.
1936 	 * We may get a very small possibility of OOO during switching, not
1937 	 * worth to optimize.
1938 	 */
1939 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1940 	    !tfile->detached)
1941 		rxhash = __skb_get_hash_symmetric(skb);
1942 
1943 	rcu_read_lock();
1944 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1945 		err = -EIO;
1946 		rcu_read_unlock();
1947 		drop_reason = SKB_DROP_REASON_DEV_READY;
1948 		goto drop;
1949 	}
1950 
1951 	if (frags) {
1952 		u32 headlen;
1953 
1954 		/* Exercise flow dissector code path. */
1955 		skb_push(skb, ETH_HLEN);
1956 		headlen = eth_get_headlen(tun->dev, skb->data,
1957 					  skb_headlen(skb));
1958 
1959 		if (unlikely(headlen > skb_headlen(skb))) {
1960 			WARN_ON_ONCE(1);
1961 			err = -ENOMEM;
1962 			dev_core_stats_rx_dropped_inc(tun->dev);
1963 napi_busy:
1964 			napi_free_frags(&tfile->napi);
1965 			rcu_read_unlock();
1966 			mutex_unlock(&tfile->napi_mutex);
1967 			return err;
1968 		}
1969 
1970 		if (likely(napi_schedule_prep(&tfile->napi))) {
1971 			local_bh_disable();
1972 			napi_gro_frags(&tfile->napi);
1973 			napi_complete(&tfile->napi);
1974 			local_bh_enable();
1975 		} else {
1976 			err = -EBUSY;
1977 			goto napi_busy;
1978 		}
1979 		mutex_unlock(&tfile->napi_mutex);
1980 	} else if (tfile->napi_enabled) {
1981 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1982 		int queue_len;
1983 
1984 		spin_lock_bh(&queue->lock);
1985 
1986 		if (unlikely(tfile->detached)) {
1987 			spin_unlock_bh(&queue->lock);
1988 			rcu_read_unlock();
1989 			err = -EBUSY;
1990 			goto free_skb;
1991 		}
1992 
1993 		__skb_queue_tail(queue, skb);
1994 		queue_len = skb_queue_len(queue);
1995 		spin_unlock(&queue->lock);
1996 
1997 		if (!more || queue_len > NAPI_POLL_WEIGHT)
1998 			napi_schedule(&tfile->napi);
1999 
2000 		local_bh_enable();
2001 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2002 		tun_rx_batched(tun, tfile, skb, more);
2003 	} else {
2004 		netif_rx(skb);
2005 	}
2006 	rcu_read_unlock();
2007 
2008 	preempt_disable();
2009 	dev_sw_netstats_rx_add(tun->dev, len);
2010 	preempt_enable();
2011 
2012 	if (rxhash)
2013 		tun_flow_update(tun, rxhash, tfile);
2014 
2015 	return total_len;
2016 
2017 drop:
2018 	if (err != -EAGAIN)
2019 		dev_core_stats_rx_dropped_inc(tun->dev);
2020 
2021 free_skb:
2022 	if (!IS_ERR_OR_NULL(skb))
2023 		kfree_skb_reason(skb, drop_reason);
2024 
2025 unlock_frags:
2026 	if (frags) {
2027 		tfile->napi.skb = NULL;
2028 		mutex_unlock(&tfile->napi_mutex);
2029 	}
2030 
2031 	return err ?: total_len;
2032 }
2033 
2034 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2035 {
2036 	struct file *file = iocb->ki_filp;
2037 	struct tun_file *tfile = file->private_data;
2038 	struct tun_struct *tun = tun_get(tfile);
2039 	ssize_t result;
2040 	int noblock = 0;
2041 
2042 	if (!tun)
2043 		return -EBADFD;
2044 
2045 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2046 		noblock = 1;
2047 
2048 	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2049 
2050 	tun_put(tun);
2051 	return result;
2052 }
2053 
2054 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2055 				struct tun_file *tfile,
2056 				struct xdp_frame *xdp_frame,
2057 				struct iov_iter *iter)
2058 {
2059 	int vnet_hdr_sz = 0;
2060 	size_t size = xdp_frame->len;
2061 	size_t ret;
2062 
2063 	if (tun->flags & IFF_VNET_HDR) {
2064 		struct virtio_net_hdr gso = { 0 };
2065 
2066 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2067 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2068 			return -EINVAL;
2069 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2070 			     sizeof(gso)))
2071 			return -EFAULT;
2072 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2073 	}
2074 
2075 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2076 
2077 	preempt_disable();
2078 	dev_sw_netstats_tx_add(tun->dev, 1, ret);
2079 	preempt_enable();
2080 
2081 	return ret;
2082 }
2083 
2084 /* Put packet to the user space buffer */
2085 static ssize_t tun_put_user(struct tun_struct *tun,
2086 			    struct tun_file *tfile,
2087 			    struct sk_buff *skb,
2088 			    struct iov_iter *iter)
2089 {
2090 	struct tun_pi pi = { 0, skb->protocol };
2091 	ssize_t total;
2092 	int vlan_offset = 0;
2093 	int vlan_hlen = 0;
2094 	int vnet_hdr_sz = 0;
2095 
2096 	if (skb_vlan_tag_present(skb))
2097 		vlan_hlen = VLAN_HLEN;
2098 
2099 	if (tun->flags & IFF_VNET_HDR)
2100 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2101 
2102 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2103 
2104 	if (!(tun->flags & IFF_NO_PI)) {
2105 		if (iov_iter_count(iter) < sizeof(pi))
2106 			return -EINVAL;
2107 
2108 		total += sizeof(pi);
2109 		if (iov_iter_count(iter) < total) {
2110 			/* Packet will be striped */
2111 			pi.flags |= TUN_PKT_STRIP;
2112 		}
2113 
2114 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2115 			return -EFAULT;
2116 	}
2117 
2118 	if (vnet_hdr_sz) {
2119 		struct virtio_net_hdr gso;
2120 
2121 		if (iov_iter_count(iter) < vnet_hdr_sz)
2122 			return -EINVAL;
2123 
2124 		if (virtio_net_hdr_from_skb(skb, &gso,
2125 					    tun_is_little_endian(tun), true,
2126 					    vlan_hlen)) {
2127 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2128 
2129 			if (net_ratelimit()) {
2130 				netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
2131 					   sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2132 					   tun16_to_cpu(tun, gso.hdr_len));
2133 				print_hex_dump(KERN_ERR, "tun: ",
2134 					       DUMP_PREFIX_NONE,
2135 					       16, 1, skb->head,
2136 					       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2137 			}
2138 			WARN_ON_ONCE(1);
2139 			return -EINVAL;
2140 		}
2141 
2142 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2143 			return -EFAULT;
2144 
2145 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2146 	}
2147 
2148 	if (vlan_hlen) {
2149 		int ret;
2150 		struct veth veth;
2151 
2152 		veth.h_vlan_proto = skb->vlan_proto;
2153 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2154 
2155 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2156 
2157 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2158 		if (ret || !iov_iter_count(iter))
2159 			goto done;
2160 
2161 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2162 		if (ret != sizeof(veth) || !iov_iter_count(iter))
2163 			goto done;
2164 	}
2165 
2166 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2167 
2168 done:
2169 	/* caller is in process context, */
2170 	preempt_disable();
2171 	dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2172 	preempt_enable();
2173 
2174 	return total;
2175 }
2176 
2177 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2178 {
2179 	DECLARE_WAITQUEUE(wait, current);
2180 	void *ptr = NULL;
2181 	int error = 0;
2182 
2183 	ptr = ptr_ring_consume(&tfile->tx_ring);
2184 	if (ptr)
2185 		goto out;
2186 	if (noblock) {
2187 		error = -EAGAIN;
2188 		goto out;
2189 	}
2190 
2191 	add_wait_queue(&tfile->socket.wq.wait, &wait);
2192 
2193 	while (1) {
2194 		set_current_state(TASK_INTERRUPTIBLE);
2195 		ptr = ptr_ring_consume(&tfile->tx_ring);
2196 		if (ptr)
2197 			break;
2198 		if (signal_pending(current)) {
2199 			error = -ERESTARTSYS;
2200 			break;
2201 		}
2202 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2203 			error = -EFAULT;
2204 			break;
2205 		}
2206 
2207 		schedule();
2208 	}
2209 
2210 	__set_current_state(TASK_RUNNING);
2211 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2212 
2213 out:
2214 	*err = error;
2215 	return ptr;
2216 }
2217 
2218 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2219 			   struct iov_iter *to,
2220 			   int noblock, void *ptr)
2221 {
2222 	ssize_t ret;
2223 	int err;
2224 
2225 	if (!iov_iter_count(to)) {
2226 		tun_ptr_free(ptr);
2227 		return 0;
2228 	}
2229 
2230 	if (!ptr) {
2231 		/* Read frames from ring */
2232 		ptr = tun_ring_recv(tfile, noblock, &err);
2233 		if (!ptr)
2234 			return err;
2235 	}
2236 
2237 	if (tun_is_xdp_frame(ptr)) {
2238 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2239 
2240 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2241 		xdp_return_frame(xdpf);
2242 	} else {
2243 		struct sk_buff *skb = ptr;
2244 
2245 		ret = tun_put_user(tun, tfile, skb, to);
2246 		if (unlikely(ret < 0))
2247 			kfree_skb(skb);
2248 		else
2249 			consume_skb(skb);
2250 	}
2251 
2252 	return ret;
2253 }
2254 
2255 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2256 {
2257 	struct file *file = iocb->ki_filp;
2258 	struct tun_file *tfile = file->private_data;
2259 	struct tun_struct *tun = tun_get(tfile);
2260 	ssize_t len = iov_iter_count(to), ret;
2261 	int noblock = 0;
2262 
2263 	if (!tun)
2264 		return -EBADFD;
2265 
2266 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2267 		noblock = 1;
2268 
2269 	ret = tun_do_read(tun, tfile, to, noblock, NULL);
2270 	ret = min_t(ssize_t, ret, len);
2271 	if (ret > 0)
2272 		iocb->ki_pos = ret;
2273 	tun_put(tun);
2274 	return ret;
2275 }
2276 
2277 static void tun_prog_free(struct rcu_head *rcu)
2278 {
2279 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2280 
2281 	bpf_prog_destroy(prog->prog);
2282 	kfree(prog);
2283 }
2284 
2285 static int __tun_set_ebpf(struct tun_struct *tun,
2286 			  struct tun_prog __rcu **prog_p,
2287 			  struct bpf_prog *prog)
2288 {
2289 	struct tun_prog *old, *new = NULL;
2290 
2291 	if (prog) {
2292 		new = kmalloc(sizeof(*new), GFP_KERNEL);
2293 		if (!new)
2294 			return -ENOMEM;
2295 		new->prog = prog;
2296 	}
2297 
2298 	spin_lock_bh(&tun->lock);
2299 	old = rcu_dereference_protected(*prog_p,
2300 					lockdep_is_held(&tun->lock));
2301 	rcu_assign_pointer(*prog_p, new);
2302 	spin_unlock_bh(&tun->lock);
2303 
2304 	if (old)
2305 		call_rcu(&old->rcu, tun_prog_free);
2306 
2307 	return 0;
2308 }
2309 
2310 static void tun_free_netdev(struct net_device *dev)
2311 {
2312 	struct tun_struct *tun = netdev_priv(dev);
2313 
2314 	BUG_ON(!(list_empty(&tun->disabled)));
2315 
2316 	tun_flow_uninit(tun);
2317 	security_tun_dev_free_security(tun->security);
2318 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2319 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2320 }
2321 
2322 static void tun_setup(struct net_device *dev)
2323 {
2324 	struct tun_struct *tun = netdev_priv(dev);
2325 
2326 	tun->owner = INVALID_UID;
2327 	tun->group = INVALID_GID;
2328 	tun_default_link_ksettings(dev, &tun->link_ksettings);
2329 
2330 	dev->ethtool_ops = &tun_ethtool_ops;
2331 	dev->needs_free_netdev = true;
2332 	dev->priv_destructor = tun_free_netdev;
2333 	/* We prefer our own queue length */
2334 	dev->tx_queue_len = TUN_READQ_SIZE;
2335 }
2336 
2337 /* Trivial set of netlink ops to allow deleting tun or tap
2338  * device with netlink.
2339  */
2340 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2341 			struct netlink_ext_ack *extack)
2342 {
2343 	NL_SET_ERR_MSG(extack,
2344 		       "tun/tap creation via rtnetlink is not supported.");
2345 	return -EOPNOTSUPP;
2346 }
2347 
2348 static size_t tun_get_size(const struct net_device *dev)
2349 {
2350 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2351 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2352 
2353 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2354 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2355 	       nla_total_size(sizeof(u8)) + /* TYPE */
2356 	       nla_total_size(sizeof(u8)) + /* PI */
2357 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2358 	       nla_total_size(sizeof(u8)) + /* PERSIST */
2359 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2360 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2361 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2362 	       0;
2363 }
2364 
2365 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2366 {
2367 	struct tun_struct *tun = netdev_priv(dev);
2368 
2369 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2370 		goto nla_put_failure;
2371 	if (uid_valid(tun->owner) &&
2372 	    nla_put_u32(skb, IFLA_TUN_OWNER,
2373 			from_kuid_munged(current_user_ns(), tun->owner)))
2374 		goto nla_put_failure;
2375 	if (gid_valid(tun->group) &&
2376 	    nla_put_u32(skb, IFLA_TUN_GROUP,
2377 			from_kgid_munged(current_user_ns(), tun->group)))
2378 		goto nla_put_failure;
2379 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2380 		goto nla_put_failure;
2381 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2382 		goto nla_put_failure;
2383 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2384 		goto nla_put_failure;
2385 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2386 		       !!(tun->flags & IFF_MULTI_QUEUE)))
2387 		goto nla_put_failure;
2388 	if (tun->flags & IFF_MULTI_QUEUE) {
2389 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2390 			goto nla_put_failure;
2391 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2392 				tun->numdisabled))
2393 			goto nla_put_failure;
2394 	}
2395 
2396 	return 0;
2397 
2398 nla_put_failure:
2399 	return -EMSGSIZE;
2400 }
2401 
2402 static struct rtnl_link_ops tun_link_ops __read_mostly = {
2403 	.kind		= DRV_NAME,
2404 	.priv_size	= sizeof(struct tun_struct),
2405 	.setup		= tun_setup,
2406 	.validate	= tun_validate,
2407 	.get_size       = tun_get_size,
2408 	.fill_info      = tun_fill_info,
2409 };
2410 
2411 static void tun_sock_write_space(struct sock *sk)
2412 {
2413 	struct tun_file *tfile;
2414 	wait_queue_head_t *wqueue;
2415 
2416 	if (!sock_writeable(sk))
2417 		return;
2418 
2419 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2420 		return;
2421 
2422 	wqueue = sk_sleep(sk);
2423 	if (wqueue && waitqueue_active(wqueue))
2424 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2425 						EPOLLWRNORM | EPOLLWRBAND);
2426 
2427 	tfile = container_of(sk, struct tun_file, sk);
2428 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2429 }
2430 
2431 static void tun_put_page(struct tun_page *tpage)
2432 {
2433 	if (tpage->page)
2434 		__page_frag_cache_drain(tpage->page, tpage->count);
2435 }
2436 
2437 static int tun_xdp_one(struct tun_struct *tun,
2438 		       struct tun_file *tfile,
2439 		       struct xdp_buff *xdp, int *flush,
2440 		       struct tun_page *tpage)
2441 {
2442 	unsigned int datasize = xdp->data_end - xdp->data;
2443 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2444 	struct virtio_net_hdr *gso = &hdr->gso;
2445 	struct bpf_prog *xdp_prog;
2446 	struct sk_buff *skb = NULL;
2447 	struct sk_buff_head *queue;
2448 	u32 rxhash = 0, act;
2449 	int buflen = hdr->buflen;
2450 	int ret = 0;
2451 	bool skb_xdp = false;
2452 	struct page *page;
2453 
2454 	xdp_prog = rcu_dereference(tun->xdp_prog);
2455 	if (xdp_prog) {
2456 		if (gso->gso_type) {
2457 			skb_xdp = true;
2458 			goto build;
2459 		}
2460 
2461 		xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2462 		xdp_set_data_meta_invalid(xdp);
2463 
2464 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2465 		ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2466 		if (ret < 0) {
2467 			put_page(virt_to_head_page(xdp->data));
2468 			return ret;
2469 		}
2470 
2471 		switch (ret) {
2472 		case XDP_REDIRECT:
2473 			*flush = true;
2474 			fallthrough;
2475 		case XDP_TX:
2476 			return 0;
2477 		case XDP_PASS:
2478 			break;
2479 		default:
2480 			page = virt_to_head_page(xdp->data);
2481 			if (tpage->page == page) {
2482 				++tpage->count;
2483 			} else {
2484 				tun_put_page(tpage);
2485 				tpage->page = page;
2486 				tpage->count = 1;
2487 			}
2488 			return 0;
2489 		}
2490 	}
2491 
2492 build:
2493 	skb = build_skb(xdp->data_hard_start, buflen);
2494 	if (!skb) {
2495 		ret = -ENOMEM;
2496 		goto out;
2497 	}
2498 
2499 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2500 	skb_put(skb, xdp->data_end - xdp->data);
2501 
2502 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2503 		atomic_long_inc(&tun->rx_frame_errors);
2504 		kfree_skb(skb);
2505 		ret = -EINVAL;
2506 		goto out;
2507 	}
2508 
2509 	skb->protocol = eth_type_trans(skb, tun->dev);
2510 	skb_reset_network_header(skb);
2511 	skb_probe_transport_header(skb);
2512 	skb_record_rx_queue(skb, tfile->queue_index);
2513 
2514 	if (skb_xdp) {
2515 		ret = do_xdp_generic(xdp_prog, &skb);
2516 		if (ret != XDP_PASS) {
2517 			ret = 0;
2518 			goto out;
2519 		}
2520 	}
2521 
2522 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2523 	    !tfile->detached)
2524 		rxhash = __skb_get_hash_symmetric(skb);
2525 
2526 	if (tfile->napi_enabled) {
2527 		queue = &tfile->sk.sk_write_queue;
2528 		spin_lock(&queue->lock);
2529 
2530 		if (unlikely(tfile->detached)) {
2531 			spin_unlock(&queue->lock);
2532 			kfree_skb(skb);
2533 			return -EBUSY;
2534 		}
2535 
2536 		__skb_queue_tail(queue, skb);
2537 		spin_unlock(&queue->lock);
2538 		ret = 1;
2539 	} else {
2540 		netif_receive_skb(skb);
2541 		ret = 0;
2542 	}
2543 
2544 	/* No need to disable preemption here since this function is
2545 	 * always called with bh disabled
2546 	 */
2547 	dev_sw_netstats_rx_add(tun->dev, datasize);
2548 
2549 	if (rxhash)
2550 		tun_flow_update(tun, rxhash, tfile);
2551 
2552 out:
2553 	return ret;
2554 }
2555 
2556 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2557 {
2558 	int ret, i;
2559 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2560 	struct tun_struct *tun = tun_get(tfile);
2561 	struct tun_msg_ctl *ctl = m->msg_control;
2562 	struct xdp_buff *xdp;
2563 
2564 	if (!tun)
2565 		return -EBADFD;
2566 
2567 	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2568 	    ctl && ctl->type == TUN_MSG_PTR) {
2569 		struct tun_page tpage;
2570 		int n = ctl->num;
2571 		int flush = 0, queued = 0;
2572 
2573 		memset(&tpage, 0, sizeof(tpage));
2574 
2575 		local_bh_disable();
2576 		rcu_read_lock();
2577 
2578 		for (i = 0; i < n; i++) {
2579 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2580 			ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2581 			if (ret > 0)
2582 				queued += ret;
2583 		}
2584 
2585 		if (flush)
2586 			xdp_do_flush();
2587 
2588 		if (tfile->napi_enabled && queued > 0)
2589 			napi_schedule(&tfile->napi);
2590 
2591 		rcu_read_unlock();
2592 		local_bh_enable();
2593 
2594 		tun_put_page(&tpage);
2595 
2596 		ret = total_len;
2597 		goto out;
2598 	}
2599 
2600 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2601 			   m->msg_flags & MSG_DONTWAIT,
2602 			   m->msg_flags & MSG_MORE);
2603 out:
2604 	tun_put(tun);
2605 	return ret;
2606 }
2607 
2608 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2609 		       int flags)
2610 {
2611 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2612 	struct tun_struct *tun = tun_get(tfile);
2613 	void *ptr = m->msg_control;
2614 	int ret;
2615 
2616 	if (!tun) {
2617 		ret = -EBADFD;
2618 		goto out_free;
2619 	}
2620 
2621 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2622 		ret = -EINVAL;
2623 		goto out_put_tun;
2624 	}
2625 	if (flags & MSG_ERRQUEUE) {
2626 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2627 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2628 		goto out;
2629 	}
2630 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2631 	if (ret > (ssize_t)total_len) {
2632 		m->msg_flags |= MSG_TRUNC;
2633 		ret = flags & MSG_TRUNC ? ret : total_len;
2634 	}
2635 out:
2636 	tun_put(tun);
2637 	return ret;
2638 
2639 out_put_tun:
2640 	tun_put(tun);
2641 out_free:
2642 	tun_ptr_free(ptr);
2643 	return ret;
2644 }
2645 
2646 static int tun_ptr_peek_len(void *ptr)
2647 {
2648 	if (likely(ptr)) {
2649 		if (tun_is_xdp_frame(ptr)) {
2650 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2651 
2652 			return xdpf->len;
2653 		}
2654 		return __skb_array_len_with_tag(ptr);
2655 	} else {
2656 		return 0;
2657 	}
2658 }
2659 
2660 static int tun_peek_len(struct socket *sock)
2661 {
2662 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2663 	struct tun_struct *tun;
2664 	int ret = 0;
2665 
2666 	tun = tun_get(tfile);
2667 	if (!tun)
2668 		return 0;
2669 
2670 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2671 	tun_put(tun);
2672 
2673 	return ret;
2674 }
2675 
2676 /* Ops structure to mimic raw sockets with tun */
2677 static const struct proto_ops tun_socket_ops = {
2678 	.peek_len = tun_peek_len,
2679 	.sendmsg = tun_sendmsg,
2680 	.recvmsg = tun_recvmsg,
2681 };
2682 
2683 static struct proto tun_proto = {
2684 	.name		= "tun",
2685 	.owner		= THIS_MODULE,
2686 	.obj_size	= sizeof(struct tun_file),
2687 };
2688 
2689 static int tun_flags(struct tun_struct *tun)
2690 {
2691 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2692 }
2693 
2694 static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2695 			      char *buf)
2696 {
2697 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2698 	return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2699 }
2700 
2701 static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2702 			  char *buf)
2703 {
2704 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2705 	return uid_valid(tun->owner)?
2706 		sysfs_emit(buf, "%u\n",
2707 			   from_kuid_munged(current_user_ns(), tun->owner)) :
2708 		sysfs_emit(buf, "-1\n");
2709 }
2710 
2711 static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2712 			  char *buf)
2713 {
2714 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2715 	return gid_valid(tun->group) ?
2716 		sysfs_emit(buf, "%u\n",
2717 			   from_kgid_munged(current_user_ns(), tun->group)) :
2718 		sysfs_emit(buf, "-1\n");
2719 }
2720 
2721 static DEVICE_ATTR_RO(tun_flags);
2722 static DEVICE_ATTR_RO(owner);
2723 static DEVICE_ATTR_RO(group);
2724 
2725 static struct attribute *tun_dev_attrs[] = {
2726 	&dev_attr_tun_flags.attr,
2727 	&dev_attr_owner.attr,
2728 	&dev_attr_group.attr,
2729 	NULL
2730 };
2731 
2732 static const struct attribute_group tun_attr_group = {
2733 	.attrs = tun_dev_attrs
2734 };
2735 
2736 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2737 {
2738 	struct tun_struct *tun;
2739 	struct tun_file *tfile = file->private_data;
2740 	struct net_device *dev;
2741 	int err;
2742 
2743 	if (tfile->detached)
2744 		return -EINVAL;
2745 
2746 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2747 		if (!capable(CAP_NET_ADMIN))
2748 			return -EPERM;
2749 
2750 		if (!(ifr->ifr_flags & IFF_NAPI) ||
2751 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2752 			return -EINVAL;
2753 	}
2754 
2755 	dev = __dev_get_by_name(net, ifr->ifr_name);
2756 	if (dev) {
2757 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2758 			return -EBUSY;
2759 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2760 			tun = netdev_priv(dev);
2761 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2762 			tun = netdev_priv(dev);
2763 		else
2764 			return -EINVAL;
2765 
2766 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2767 		    !!(tun->flags & IFF_MULTI_QUEUE))
2768 			return -EINVAL;
2769 
2770 		if (tun_not_capable(tun))
2771 			return -EPERM;
2772 		err = security_tun_dev_open(tun->security);
2773 		if (err < 0)
2774 			return err;
2775 
2776 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2777 				 ifr->ifr_flags & IFF_NAPI,
2778 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2779 		if (err < 0)
2780 			return err;
2781 
2782 		if (tun->flags & IFF_MULTI_QUEUE &&
2783 		    (tun->numqueues + tun->numdisabled > 1)) {
2784 			/* One or more queue has already been attached, no need
2785 			 * to initialize the device again.
2786 			 */
2787 			netdev_state_change(dev);
2788 			return 0;
2789 		}
2790 
2791 		tun->flags = (tun->flags & ~TUN_FEATURES) |
2792 			      (ifr->ifr_flags & TUN_FEATURES);
2793 
2794 		netdev_state_change(dev);
2795 	} else {
2796 		char *name;
2797 		unsigned long flags = 0;
2798 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2799 			     MAX_TAP_QUEUES : 1;
2800 
2801 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2802 			return -EPERM;
2803 		err = security_tun_dev_create();
2804 		if (err < 0)
2805 			return err;
2806 
2807 		/* Set dev type */
2808 		if (ifr->ifr_flags & IFF_TUN) {
2809 			/* TUN device */
2810 			flags |= IFF_TUN;
2811 			name = "tun%d";
2812 		} else if (ifr->ifr_flags & IFF_TAP) {
2813 			/* TAP device */
2814 			flags |= IFF_TAP;
2815 			name = "tap%d";
2816 		} else
2817 			return -EINVAL;
2818 
2819 		if (*ifr->ifr_name)
2820 			name = ifr->ifr_name;
2821 
2822 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2823 				       NET_NAME_UNKNOWN, tun_setup, queues,
2824 				       queues);
2825 
2826 		if (!dev)
2827 			return -ENOMEM;
2828 
2829 		dev_net_set(dev, net);
2830 		dev->rtnl_link_ops = &tun_link_ops;
2831 		dev->ifindex = tfile->ifindex;
2832 		dev->sysfs_groups[0] = &tun_attr_group;
2833 
2834 		tun = netdev_priv(dev);
2835 		tun->dev = dev;
2836 		tun->flags = flags;
2837 		tun->txflt.count = 0;
2838 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2839 
2840 		tun->align = NET_SKB_PAD;
2841 		tun->filter_attached = false;
2842 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2843 		tun->rx_batched = 0;
2844 		RCU_INIT_POINTER(tun->steering_prog, NULL);
2845 
2846 		tun->ifr = ifr;
2847 		tun->file = file;
2848 
2849 		tun_net_initialize(dev);
2850 
2851 		err = register_netdevice(tun->dev);
2852 		if (err < 0) {
2853 			free_netdev(dev);
2854 			return err;
2855 		}
2856 		/* free_netdev() won't check refcnt, to avoid race
2857 		 * with dev_put() we need publish tun after registration.
2858 		 */
2859 		rcu_assign_pointer(tfile->tun, tun);
2860 	}
2861 
2862 	if (ifr->ifr_flags & IFF_NO_CARRIER)
2863 		netif_carrier_off(tun->dev);
2864 	else
2865 		netif_carrier_on(tun->dev);
2866 
2867 	/* Make sure persistent devices do not get stuck in
2868 	 * xoff state.
2869 	 */
2870 	if (netif_running(tun->dev))
2871 		netif_tx_wake_all_queues(tun->dev);
2872 
2873 	strcpy(ifr->ifr_name, tun->dev->name);
2874 	return 0;
2875 }
2876 
2877 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2878 {
2879 	strcpy(ifr->ifr_name, tun->dev->name);
2880 
2881 	ifr->ifr_flags = tun_flags(tun);
2882 
2883 }
2884 
2885 /* This is like a cut-down ethtool ops, except done via tun fd so no
2886  * privs required. */
2887 static int set_offload(struct tun_struct *tun, unsigned long arg)
2888 {
2889 	netdev_features_t features = 0;
2890 
2891 	if (arg & TUN_F_CSUM) {
2892 		features |= NETIF_F_HW_CSUM;
2893 		arg &= ~TUN_F_CSUM;
2894 
2895 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2896 			if (arg & TUN_F_TSO_ECN) {
2897 				features |= NETIF_F_TSO_ECN;
2898 				arg &= ~TUN_F_TSO_ECN;
2899 			}
2900 			if (arg & TUN_F_TSO4)
2901 				features |= NETIF_F_TSO;
2902 			if (arg & TUN_F_TSO6)
2903 				features |= NETIF_F_TSO6;
2904 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2905 		}
2906 
2907 		arg &= ~TUN_F_UFO;
2908 
2909 		/* TODO: for now USO4 and USO6 should work simultaneously */
2910 		if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2911 			features |= NETIF_F_GSO_UDP_L4;
2912 			arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2913 		}
2914 	}
2915 
2916 	/* This gives the user a way to test for new features in future by
2917 	 * trying to set them. */
2918 	if (arg)
2919 		return -EINVAL;
2920 
2921 	tun->set_features = features;
2922 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2923 	tun->dev->wanted_features |= features;
2924 	netdev_update_features(tun->dev);
2925 
2926 	return 0;
2927 }
2928 
2929 static void tun_detach_filter(struct tun_struct *tun, int n)
2930 {
2931 	int i;
2932 	struct tun_file *tfile;
2933 
2934 	for (i = 0; i < n; i++) {
2935 		tfile = rtnl_dereference(tun->tfiles[i]);
2936 		lock_sock(tfile->socket.sk);
2937 		sk_detach_filter(tfile->socket.sk);
2938 		release_sock(tfile->socket.sk);
2939 	}
2940 
2941 	tun->filter_attached = false;
2942 }
2943 
2944 static int tun_attach_filter(struct tun_struct *tun)
2945 {
2946 	int i, ret = 0;
2947 	struct tun_file *tfile;
2948 
2949 	for (i = 0; i < tun->numqueues; i++) {
2950 		tfile = rtnl_dereference(tun->tfiles[i]);
2951 		lock_sock(tfile->socket.sk);
2952 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2953 		release_sock(tfile->socket.sk);
2954 		if (ret) {
2955 			tun_detach_filter(tun, i);
2956 			return ret;
2957 		}
2958 	}
2959 
2960 	tun->filter_attached = true;
2961 	return ret;
2962 }
2963 
2964 static void tun_set_sndbuf(struct tun_struct *tun)
2965 {
2966 	struct tun_file *tfile;
2967 	int i;
2968 
2969 	for (i = 0; i < tun->numqueues; i++) {
2970 		tfile = rtnl_dereference(tun->tfiles[i]);
2971 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2972 	}
2973 }
2974 
2975 static int tun_set_queue(struct file *file, struct ifreq *ifr)
2976 {
2977 	struct tun_file *tfile = file->private_data;
2978 	struct tun_struct *tun;
2979 	int ret = 0;
2980 
2981 	rtnl_lock();
2982 
2983 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2984 		tun = tfile->detached;
2985 		if (!tun) {
2986 			ret = -EINVAL;
2987 			goto unlock;
2988 		}
2989 		ret = security_tun_dev_attach_queue(tun->security);
2990 		if (ret < 0)
2991 			goto unlock;
2992 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2993 				 tun->flags & IFF_NAPI_FRAGS, true);
2994 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2995 		tun = rtnl_dereference(tfile->tun);
2996 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2997 			ret = -EINVAL;
2998 		else
2999 			__tun_detach(tfile, false);
3000 	} else
3001 		ret = -EINVAL;
3002 
3003 	if (ret >= 0)
3004 		netdev_state_change(tun->dev);
3005 
3006 unlock:
3007 	rtnl_unlock();
3008 	return ret;
3009 }
3010 
3011 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3012 			void __user *data)
3013 {
3014 	struct bpf_prog *prog;
3015 	int fd;
3016 
3017 	if (copy_from_user(&fd, data, sizeof(fd)))
3018 		return -EFAULT;
3019 
3020 	if (fd == -1) {
3021 		prog = NULL;
3022 	} else {
3023 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3024 		if (IS_ERR(prog))
3025 			return PTR_ERR(prog);
3026 	}
3027 
3028 	return __tun_set_ebpf(tun, prog_p, prog);
3029 }
3030 
3031 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3032 static unsigned char tun_get_addr_len(unsigned short type)
3033 {
3034 	switch (type) {
3035 	case ARPHRD_IP6GRE:
3036 	case ARPHRD_TUNNEL6:
3037 		return sizeof(struct in6_addr);
3038 	case ARPHRD_IPGRE:
3039 	case ARPHRD_TUNNEL:
3040 	case ARPHRD_SIT:
3041 		return 4;
3042 	case ARPHRD_ETHER:
3043 		return ETH_ALEN;
3044 	case ARPHRD_IEEE802154:
3045 	case ARPHRD_IEEE802154_MONITOR:
3046 		return IEEE802154_EXTENDED_ADDR_LEN;
3047 	case ARPHRD_PHONET_PIPE:
3048 	case ARPHRD_PPP:
3049 	case ARPHRD_NONE:
3050 		return 0;
3051 	case ARPHRD_6LOWPAN:
3052 		return EUI64_ADDR_LEN;
3053 	case ARPHRD_FDDI:
3054 		return FDDI_K_ALEN;
3055 	case ARPHRD_HIPPI:
3056 		return HIPPI_ALEN;
3057 	case ARPHRD_IEEE802:
3058 		return FC_ALEN;
3059 	case ARPHRD_ROSE:
3060 		return ROSE_ADDR_LEN;
3061 	case ARPHRD_NETROM:
3062 		return AX25_ADDR_LEN;
3063 	case ARPHRD_LOCALTLK:
3064 		return LTALK_ALEN;
3065 	default:
3066 		return 0;
3067 	}
3068 }
3069 
3070 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3071 			    unsigned long arg, int ifreq_len)
3072 {
3073 	struct tun_file *tfile = file->private_data;
3074 	struct net *net = sock_net(&tfile->sk);
3075 	struct tun_struct *tun;
3076 	void __user* argp = (void __user*)arg;
3077 	unsigned int carrier;
3078 	struct ifreq ifr;
3079 	kuid_t owner;
3080 	kgid_t group;
3081 	int ifindex;
3082 	int sndbuf;
3083 	int vnet_hdr_sz;
3084 	int le;
3085 	int ret;
3086 	bool do_notify = false;
3087 
3088 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3089 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3090 		if (copy_from_user(&ifr, argp, ifreq_len))
3091 			return -EFAULT;
3092 	} else {
3093 		memset(&ifr, 0, sizeof(ifr));
3094 	}
3095 	if (cmd == TUNGETFEATURES) {
3096 		/* Currently this just means: "what IFF flags are valid?".
3097 		 * This is needed because we never checked for invalid flags on
3098 		 * TUNSETIFF.
3099 		 */
3100 		return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3101 				TUN_FEATURES, (unsigned int __user*)argp);
3102 	} else if (cmd == TUNSETQUEUE) {
3103 		return tun_set_queue(file, &ifr);
3104 	} else if (cmd == SIOCGSKNS) {
3105 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3106 			return -EPERM;
3107 		return open_related_ns(&net->ns, get_net_ns);
3108 	}
3109 
3110 	rtnl_lock();
3111 
3112 	tun = tun_get(tfile);
3113 	if (cmd == TUNSETIFF) {
3114 		ret = -EEXIST;
3115 		if (tun)
3116 			goto unlock;
3117 
3118 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3119 
3120 		ret = tun_set_iff(net, file, &ifr);
3121 
3122 		if (ret)
3123 			goto unlock;
3124 
3125 		if (copy_to_user(argp, &ifr, ifreq_len))
3126 			ret = -EFAULT;
3127 		goto unlock;
3128 	}
3129 	if (cmd == TUNSETIFINDEX) {
3130 		ret = -EPERM;
3131 		if (tun)
3132 			goto unlock;
3133 
3134 		ret = -EFAULT;
3135 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3136 			goto unlock;
3137 		ret = -EINVAL;
3138 		if (ifindex < 0)
3139 			goto unlock;
3140 		ret = 0;
3141 		tfile->ifindex = ifindex;
3142 		goto unlock;
3143 	}
3144 
3145 	ret = -EBADFD;
3146 	if (!tun)
3147 		goto unlock;
3148 
3149 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3150 
3151 	net = dev_net(tun->dev);
3152 	ret = 0;
3153 	switch (cmd) {
3154 	case TUNGETIFF:
3155 		tun_get_iff(tun, &ifr);
3156 
3157 		if (tfile->detached)
3158 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3159 		if (!tfile->socket.sk->sk_filter)
3160 			ifr.ifr_flags |= IFF_NOFILTER;
3161 
3162 		if (copy_to_user(argp, &ifr, ifreq_len))
3163 			ret = -EFAULT;
3164 		break;
3165 
3166 	case TUNSETNOCSUM:
3167 		/* Disable/Enable checksum */
3168 
3169 		/* [unimplemented] */
3170 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3171 			   arg ? "disabled" : "enabled");
3172 		break;
3173 
3174 	case TUNSETPERSIST:
3175 		/* Disable/Enable persist mode. Keep an extra reference to the
3176 		 * module to prevent the module being unprobed.
3177 		 */
3178 		if (arg && !(tun->flags & IFF_PERSIST)) {
3179 			tun->flags |= IFF_PERSIST;
3180 			__module_get(THIS_MODULE);
3181 			do_notify = true;
3182 		}
3183 		if (!arg && (tun->flags & IFF_PERSIST)) {
3184 			tun->flags &= ~IFF_PERSIST;
3185 			module_put(THIS_MODULE);
3186 			do_notify = true;
3187 		}
3188 
3189 		netif_info(tun, drv, tun->dev, "persist %s\n",
3190 			   arg ? "enabled" : "disabled");
3191 		break;
3192 
3193 	case TUNSETOWNER:
3194 		/* Set owner of the device */
3195 		owner = make_kuid(current_user_ns(), arg);
3196 		if (!uid_valid(owner)) {
3197 			ret = -EINVAL;
3198 			break;
3199 		}
3200 		tun->owner = owner;
3201 		do_notify = true;
3202 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
3203 			   from_kuid(&init_user_ns, tun->owner));
3204 		break;
3205 
3206 	case TUNSETGROUP:
3207 		/* Set group of the device */
3208 		group = make_kgid(current_user_ns(), arg);
3209 		if (!gid_valid(group)) {
3210 			ret = -EINVAL;
3211 			break;
3212 		}
3213 		tun->group = group;
3214 		do_notify = true;
3215 		netif_info(tun, drv, tun->dev, "group set to %u\n",
3216 			   from_kgid(&init_user_ns, tun->group));
3217 		break;
3218 
3219 	case TUNSETLINK:
3220 		/* Only allow setting the type when the interface is down */
3221 		if (tun->dev->flags & IFF_UP) {
3222 			netif_info(tun, drv, tun->dev,
3223 				   "Linktype set failed because interface is up\n");
3224 			ret = -EBUSY;
3225 		} else {
3226 			ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3227 						       tun->dev);
3228 			ret = notifier_to_errno(ret);
3229 			if (ret) {
3230 				netif_info(tun, drv, tun->dev,
3231 					   "Refused to change device type\n");
3232 				break;
3233 			}
3234 			tun->dev->type = (int) arg;
3235 			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3236 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3237 				   tun->dev->type);
3238 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3239 						 tun->dev);
3240 		}
3241 		break;
3242 
3243 	case TUNSETDEBUG:
3244 		tun->msg_enable = (u32)arg;
3245 		break;
3246 
3247 	case TUNSETOFFLOAD:
3248 		ret = set_offload(tun, arg);
3249 		break;
3250 
3251 	case TUNSETTXFILTER:
3252 		/* Can be set only for TAPs */
3253 		ret = -EINVAL;
3254 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3255 			break;
3256 		ret = update_filter(&tun->txflt, (void __user *)arg);
3257 		break;
3258 
3259 	case SIOCGIFHWADDR:
3260 		/* Get hw address */
3261 		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3262 		if (copy_to_user(argp, &ifr, ifreq_len))
3263 			ret = -EFAULT;
3264 		break;
3265 
3266 	case SIOCSIFHWADDR:
3267 		/* Set hw address */
3268 		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3269 		break;
3270 
3271 	case TUNGETSNDBUF:
3272 		sndbuf = tfile->socket.sk->sk_sndbuf;
3273 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3274 			ret = -EFAULT;
3275 		break;
3276 
3277 	case TUNSETSNDBUF:
3278 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3279 			ret = -EFAULT;
3280 			break;
3281 		}
3282 		if (sndbuf <= 0) {
3283 			ret = -EINVAL;
3284 			break;
3285 		}
3286 
3287 		tun->sndbuf = sndbuf;
3288 		tun_set_sndbuf(tun);
3289 		break;
3290 
3291 	case TUNGETVNETHDRSZ:
3292 		vnet_hdr_sz = tun->vnet_hdr_sz;
3293 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3294 			ret = -EFAULT;
3295 		break;
3296 
3297 	case TUNSETVNETHDRSZ:
3298 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3299 			ret = -EFAULT;
3300 			break;
3301 		}
3302 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3303 			ret = -EINVAL;
3304 			break;
3305 		}
3306 
3307 		tun->vnet_hdr_sz = vnet_hdr_sz;
3308 		break;
3309 
3310 	case TUNGETVNETLE:
3311 		le = !!(tun->flags & TUN_VNET_LE);
3312 		if (put_user(le, (int __user *)argp))
3313 			ret = -EFAULT;
3314 		break;
3315 
3316 	case TUNSETVNETLE:
3317 		if (get_user(le, (int __user *)argp)) {
3318 			ret = -EFAULT;
3319 			break;
3320 		}
3321 		if (le)
3322 			tun->flags |= TUN_VNET_LE;
3323 		else
3324 			tun->flags &= ~TUN_VNET_LE;
3325 		break;
3326 
3327 	case TUNGETVNETBE:
3328 		ret = tun_get_vnet_be(tun, argp);
3329 		break;
3330 
3331 	case TUNSETVNETBE:
3332 		ret = tun_set_vnet_be(tun, argp);
3333 		break;
3334 
3335 	case TUNATTACHFILTER:
3336 		/* Can be set only for TAPs */
3337 		ret = -EINVAL;
3338 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3339 			break;
3340 		ret = -EFAULT;
3341 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3342 			break;
3343 
3344 		ret = tun_attach_filter(tun);
3345 		break;
3346 
3347 	case TUNDETACHFILTER:
3348 		/* Can be set only for TAPs */
3349 		ret = -EINVAL;
3350 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3351 			break;
3352 		ret = 0;
3353 		tun_detach_filter(tun, tun->numqueues);
3354 		break;
3355 
3356 	case TUNGETFILTER:
3357 		ret = -EINVAL;
3358 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3359 			break;
3360 		ret = -EFAULT;
3361 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3362 			break;
3363 		ret = 0;
3364 		break;
3365 
3366 	case TUNSETSTEERINGEBPF:
3367 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3368 		break;
3369 
3370 	case TUNSETFILTEREBPF:
3371 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3372 		break;
3373 
3374 	case TUNSETCARRIER:
3375 		ret = -EFAULT;
3376 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3377 			goto unlock;
3378 
3379 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3380 		break;
3381 
3382 	case TUNGETDEVNETNS:
3383 		ret = -EPERM;
3384 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3385 			goto unlock;
3386 		ret = open_related_ns(&net->ns, get_net_ns);
3387 		break;
3388 
3389 	default:
3390 		ret = -EINVAL;
3391 		break;
3392 	}
3393 
3394 	if (do_notify)
3395 		netdev_state_change(tun->dev);
3396 
3397 unlock:
3398 	rtnl_unlock();
3399 	if (tun)
3400 		tun_put(tun);
3401 	return ret;
3402 }
3403 
3404 static long tun_chr_ioctl(struct file *file,
3405 			  unsigned int cmd, unsigned long arg)
3406 {
3407 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3408 }
3409 
3410 #ifdef CONFIG_COMPAT
3411 static long tun_chr_compat_ioctl(struct file *file,
3412 			 unsigned int cmd, unsigned long arg)
3413 {
3414 	switch (cmd) {
3415 	case TUNSETIFF:
3416 	case TUNGETIFF:
3417 	case TUNSETTXFILTER:
3418 	case TUNGETSNDBUF:
3419 	case TUNSETSNDBUF:
3420 	case SIOCGIFHWADDR:
3421 	case SIOCSIFHWADDR:
3422 		arg = (unsigned long)compat_ptr(arg);
3423 		break;
3424 	default:
3425 		arg = (compat_ulong_t)arg;
3426 		break;
3427 	}
3428 
3429 	/*
3430 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3431 	 * the end of that structure. All fields that are used in this
3432 	 * driver are compatible though, we don't need to convert the
3433 	 * contents.
3434 	 */
3435 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3436 }
3437 #endif /* CONFIG_COMPAT */
3438 
3439 static int tun_chr_fasync(int fd, struct file *file, int on)
3440 {
3441 	struct tun_file *tfile = file->private_data;
3442 	int ret;
3443 
3444 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3445 		goto out;
3446 
3447 	if (on) {
3448 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3449 		tfile->flags |= TUN_FASYNC;
3450 	} else
3451 		tfile->flags &= ~TUN_FASYNC;
3452 	ret = 0;
3453 out:
3454 	return ret;
3455 }
3456 
3457 static int tun_chr_open(struct inode *inode, struct file * file)
3458 {
3459 	struct net *net = current->nsproxy->net_ns;
3460 	struct tun_file *tfile;
3461 
3462 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3463 					    &tun_proto, 0);
3464 	if (!tfile)
3465 		return -ENOMEM;
3466 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3467 		sk_free(&tfile->sk);
3468 		return -ENOMEM;
3469 	}
3470 
3471 	mutex_init(&tfile->napi_mutex);
3472 	RCU_INIT_POINTER(tfile->tun, NULL);
3473 	tfile->flags = 0;
3474 	tfile->ifindex = 0;
3475 
3476 	init_waitqueue_head(&tfile->socket.wq.wait);
3477 
3478 	tfile->socket.file = file;
3479 	tfile->socket.ops = &tun_socket_ops;
3480 
3481 	sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3482 
3483 	tfile->sk.sk_write_space = tun_sock_write_space;
3484 	tfile->sk.sk_sndbuf = INT_MAX;
3485 
3486 	file->private_data = tfile;
3487 	INIT_LIST_HEAD(&tfile->next);
3488 
3489 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3490 
3491 	/* tun groks IOCB_NOWAIT just fine, mark it as such */
3492 	file->f_mode |= FMODE_NOWAIT;
3493 	return 0;
3494 }
3495 
3496 static int tun_chr_close(struct inode *inode, struct file *file)
3497 {
3498 	struct tun_file *tfile = file->private_data;
3499 
3500 	tun_detach(tfile, true);
3501 
3502 	return 0;
3503 }
3504 
3505 #ifdef CONFIG_PROC_FS
3506 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3507 {
3508 	struct tun_file *tfile = file->private_data;
3509 	struct tun_struct *tun;
3510 	struct ifreq ifr;
3511 
3512 	memset(&ifr, 0, sizeof(ifr));
3513 
3514 	rtnl_lock();
3515 	tun = tun_get(tfile);
3516 	if (tun)
3517 		tun_get_iff(tun, &ifr);
3518 	rtnl_unlock();
3519 
3520 	if (tun)
3521 		tun_put(tun);
3522 
3523 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3524 }
3525 #endif
3526 
3527 static const struct file_operations tun_fops = {
3528 	.owner	= THIS_MODULE,
3529 	.llseek = no_llseek,
3530 	.read_iter  = tun_chr_read_iter,
3531 	.write_iter = tun_chr_write_iter,
3532 	.poll	= tun_chr_poll,
3533 	.unlocked_ioctl	= tun_chr_ioctl,
3534 #ifdef CONFIG_COMPAT
3535 	.compat_ioctl = tun_chr_compat_ioctl,
3536 #endif
3537 	.open	= tun_chr_open,
3538 	.release = tun_chr_close,
3539 	.fasync = tun_chr_fasync,
3540 #ifdef CONFIG_PROC_FS
3541 	.show_fdinfo = tun_chr_show_fdinfo,
3542 #endif
3543 };
3544 
3545 static struct miscdevice tun_miscdev = {
3546 	.minor = TUN_MINOR,
3547 	.name = "tun",
3548 	.nodename = "net/tun",
3549 	.fops = &tun_fops,
3550 };
3551 
3552 /* ethtool interface */
3553 
3554 static void tun_default_link_ksettings(struct net_device *dev,
3555 				       struct ethtool_link_ksettings *cmd)
3556 {
3557 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3558 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3559 	cmd->base.speed		= SPEED_10000;
3560 	cmd->base.duplex	= DUPLEX_FULL;
3561 	cmd->base.port		= PORT_TP;
3562 	cmd->base.phy_address	= 0;
3563 	cmd->base.autoneg	= AUTONEG_DISABLE;
3564 }
3565 
3566 static int tun_get_link_ksettings(struct net_device *dev,
3567 				  struct ethtool_link_ksettings *cmd)
3568 {
3569 	struct tun_struct *tun = netdev_priv(dev);
3570 
3571 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3572 	return 0;
3573 }
3574 
3575 static int tun_set_link_ksettings(struct net_device *dev,
3576 				  const struct ethtool_link_ksettings *cmd)
3577 {
3578 	struct tun_struct *tun = netdev_priv(dev);
3579 
3580 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3581 	return 0;
3582 }
3583 
3584 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3585 {
3586 	struct tun_struct *tun = netdev_priv(dev);
3587 
3588 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3589 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3590 
3591 	switch (tun->flags & TUN_TYPE_MASK) {
3592 	case IFF_TUN:
3593 		strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3594 		break;
3595 	case IFF_TAP:
3596 		strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3597 		break;
3598 	}
3599 }
3600 
3601 static u32 tun_get_msglevel(struct net_device *dev)
3602 {
3603 	struct tun_struct *tun = netdev_priv(dev);
3604 
3605 	return tun->msg_enable;
3606 }
3607 
3608 static void tun_set_msglevel(struct net_device *dev, u32 value)
3609 {
3610 	struct tun_struct *tun = netdev_priv(dev);
3611 
3612 	tun->msg_enable = value;
3613 }
3614 
3615 static int tun_get_coalesce(struct net_device *dev,
3616 			    struct ethtool_coalesce *ec,
3617 			    struct kernel_ethtool_coalesce *kernel_coal,
3618 			    struct netlink_ext_ack *extack)
3619 {
3620 	struct tun_struct *tun = netdev_priv(dev);
3621 
3622 	ec->rx_max_coalesced_frames = tun->rx_batched;
3623 
3624 	return 0;
3625 }
3626 
3627 static int tun_set_coalesce(struct net_device *dev,
3628 			    struct ethtool_coalesce *ec,
3629 			    struct kernel_ethtool_coalesce *kernel_coal,
3630 			    struct netlink_ext_ack *extack)
3631 {
3632 	struct tun_struct *tun = netdev_priv(dev);
3633 
3634 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3635 		tun->rx_batched = NAPI_POLL_WEIGHT;
3636 	else
3637 		tun->rx_batched = ec->rx_max_coalesced_frames;
3638 
3639 	return 0;
3640 }
3641 
3642 static void tun_get_channels(struct net_device *dev,
3643 			     struct ethtool_channels *channels)
3644 {
3645 	struct tun_struct *tun = netdev_priv(dev);
3646 
3647 	channels->combined_count = tun->numqueues;
3648 	channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
3649 }
3650 
3651 static const struct ethtool_ops tun_ethtool_ops = {
3652 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3653 	.get_drvinfo	= tun_get_drvinfo,
3654 	.get_msglevel	= tun_get_msglevel,
3655 	.set_msglevel	= tun_set_msglevel,
3656 	.get_link	= ethtool_op_get_link,
3657 	.get_channels   = tun_get_channels,
3658 	.get_ts_info	= ethtool_op_get_ts_info,
3659 	.get_coalesce   = tun_get_coalesce,
3660 	.set_coalesce   = tun_set_coalesce,
3661 	.get_link_ksettings = tun_get_link_ksettings,
3662 	.set_link_ksettings = tun_set_link_ksettings,
3663 };
3664 
3665 static int tun_queue_resize(struct tun_struct *tun)
3666 {
3667 	struct net_device *dev = tun->dev;
3668 	struct tun_file *tfile;
3669 	struct ptr_ring **rings;
3670 	int n = tun->numqueues + tun->numdisabled;
3671 	int ret, i;
3672 
3673 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3674 	if (!rings)
3675 		return -ENOMEM;
3676 
3677 	for (i = 0; i < tun->numqueues; i++) {
3678 		tfile = rtnl_dereference(tun->tfiles[i]);
3679 		rings[i] = &tfile->tx_ring;
3680 	}
3681 	list_for_each_entry(tfile, &tun->disabled, next)
3682 		rings[i++] = &tfile->tx_ring;
3683 
3684 	ret = ptr_ring_resize_multiple(rings, n,
3685 				       dev->tx_queue_len, GFP_KERNEL,
3686 				       tun_ptr_free);
3687 
3688 	kfree(rings);
3689 	return ret;
3690 }
3691 
3692 static int tun_device_event(struct notifier_block *unused,
3693 			    unsigned long event, void *ptr)
3694 {
3695 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3696 	struct tun_struct *tun = netdev_priv(dev);
3697 	int i;
3698 
3699 	if (dev->rtnl_link_ops != &tun_link_ops)
3700 		return NOTIFY_DONE;
3701 
3702 	switch (event) {
3703 	case NETDEV_CHANGE_TX_QUEUE_LEN:
3704 		if (tun_queue_resize(tun))
3705 			return NOTIFY_BAD;
3706 		break;
3707 	case NETDEV_UP:
3708 		for (i = 0; i < tun->numqueues; i++) {
3709 			struct tun_file *tfile;
3710 
3711 			tfile = rtnl_dereference(tun->tfiles[i]);
3712 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3713 		}
3714 		break;
3715 	default:
3716 		break;
3717 	}
3718 
3719 	return NOTIFY_DONE;
3720 }
3721 
3722 static struct notifier_block tun_notifier_block __read_mostly = {
3723 	.notifier_call	= tun_device_event,
3724 };
3725 
3726 static int __init tun_init(void)
3727 {
3728 	int ret = 0;
3729 
3730 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3731 
3732 	ret = rtnl_link_register(&tun_link_ops);
3733 	if (ret) {
3734 		pr_err("Can't register link_ops\n");
3735 		goto err_linkops;
3736 	}
3737 
3738 	ret = misc_register(&tun_miscdev);
3739 	if (ret) {
3740 		pr_err("Can't register misc device %d\n", TUN_MINOR);
3741 		goto err_misc;
3742 	}
3743 
3744 	ret = register_netdevice_notifier(&tun_notifier_block);
3745 	if (ret) {
3746 		pr_err("Can't register netdevice notifier\n");
3747 		goto err_notifier;
3748 	}
3749 
3750 	return  0;
3751 
3752 err_notifier:
3753 	misc_deregister(&tun_miscdev);
3754 err_misc:
3755 	rtnl_link_unregister(&tun_link_ops);
3756 err_linkops:
3757 	return ret;
3758 }
3759 
3760 static void __exit tun_cleanup(void)
3761 {
3762 	misc_deregister(&tun_miscdev);
3763 	rtnl_link_unregister(&tun_link_ops);
3764 	unregister_netdevice_notifier(&tun_notifier_block);
3765 }
3766 
3767 /* Get an underlying socket object from tun file.  Returns error unless file is
3768  * attached to a device.  The returned object works like a packet socket, it
3769  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3770  * holding a reference to the file for as long as the socket is in use. */
3771 struct socket *tun_get_socket(struct file *file)
3772 {
3773 	struct tun_file *tfile;
3774 	if (file->f_op != &tun_fops)
3775 		return ERR_PTR(-EINVAL);
3776 	tfile = file->private_data;
3777 	if (!tfile)
3778 		return ERR_PTR(-EBADFD);
3779 	return &tfile->socket;
3780 }
3781 EXPORT_SYMBOL_GPL(tun_get_socket);
3782 
3783 struct ptr_ring *tun_get_tx_ring(struct file *file)
3784 {
3785 	struct tun_file *tfile;
3786 
3787 	if (file->f_op != &tun_fops)
3788 		return ERR_PTR(-EINVAL);
3789 	tfile = file->private_data;
3790 	if (!tfile)
3791 		return ERR_PTR(-EBADFD);
3792 	return &tfile->tx_ring;
3793 }
3794 EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3795 
3796 module_init(tun_init);
3797 module_exit(tun_cleanup);
3798 MODULE_DESCRIPTION(DRV_DESCRIPTION);
3799 MODULE_AUTHOR(DRV_COPYRIGHT);
3800 MODULE_LICENSE("GPL");
3801 MODULE_ALIAS_MISCDEV(TUN_MINOR);
3802 MODULE_ALIAS("devname:net/tun");
3803