xref: /linux/net/core/net_namespace.c (revision 1da91ea8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24 
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 
30 /*
31  *	Our network namespace constructor/destructor lists
32  */
33 
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36 
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39 
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43 
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47 
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50 
51 static bool init_net_initialized;
52 /*
53  * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54  * init_net_initialized and first_device pointer.
55  * This is internal net namespace object. Please, don't use it
56  * outside.
57  */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59 EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
60 
61 #define MIN_PERNET_OPS_ID	\
62 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
63 
64 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
65 
66 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
67 
68 DEFINE_COOKIE(net_cookie);
69 
net_alloc_generic(void)70 static struct net_generic *net_alloc_generic(void)
71 {
72 	unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
73 	unsigned int generic_size;
74 	struct net_generic *ng;
75 
76 	generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
77 
78 	ng = kzalloc(generic_size, GFP_KERNEL);
79 	if (ng)
80 		ng->s.len = gen_ptrs;
81 
82 	return ng;
83 }
84 
net_assign_generic(struct net * net,unsigned int id,void * data)85 static int net_assign_generic(struct net *net, unsigned int id, void *data)
86 {
87 	struct net_generic *ng, *old_ng;
88 
89 	BUG_ON(id < MIN_PERNET_OPS_ID);
90 
91 	old_ng = rcu_dereference_protected(net->gen,
92 					   lockdep_is_held(&pernet_ops_rwsem));
93 	if (old_ng->s.len > id) {
94 		old_ng->ptr[id] = data;
95 		return 0;
96 	}
97 
98 	ng = net_alloc_generic();
99 	if (!ng)
100 		return -ENOMEM;
101 
102 	/*
103 	 * Some synchronisation notes:
104 	 *
105 	 * The net_generic explores the net->gen array inside rcu
106 	 * read section. Besides once set the net->gen->ptr[x]
107 	 * pointer never changes (see rules in netns/generic.h).
108 	 *
109 	 * That said, we simply duplicate this array and schedule
110 	 * the old copy for kfree after a grace period.
111 	 */
112 
113 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
114 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
115 	ng->ptr[id] = data;
116 
117 	rcu_assign_pointer(net->gen, ng);
118 	kfree_rcu(old_ng, s.rcu);
119 	return 0;
120 }
121 
ops_init(const struct pernet_operations * ops,struct net * net)122 static int ops_init(const struct pernet_operations *ops, struct net *net)
123 {
124 	struct net_generic *ng;
125 	int err = -ENOMEM;
126 	void *data = NULL;
127 
128 	if (ops->id) {
129 		data = kzalloc(ops->size, GFP_KERNEL);
130 		if (!data)
131 			goto out;
132 
133 		err = net_assign_generic(net, *ops->id, data);
134 		if (err)
135 			goto cleanup;
136 	}
137 	err = 0;
138 	if (ops->init)
139 		err = ops->init(net);
140 	if (!err)
141 		return 0;
142 
143 	if (ops->id) {
144 		ng = rcu_dereference_protected(net->gen,
145 					       lockdep_is_held(&pernet_ops_rwsem));
146 		ng->ptr[*ops->id] = NULL;
147 	}
148 
149 cleanup:
150 	kfree(data);
151 
152 out:
153 	return err;
154 }
155 
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)156 static void ops_pre_exit_list(const struct pernet_operations *ops,
157 			      struct list_head *net_exit_list)
158 {
159 	struct net *net;
160 
161 	if (ops->pre_exit) {
162 		list_for_each_entry(net, net_exit_list, exit_list)
163 			ops->pre_exit(net);
164 	}
165 }
166 
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)167 static void ops_exit_list(const struct pernet_operations *ops,
168 			  struct list_head *net_exit_list)
169 {
170 	struct net *net;
171 	if (ops->exit) {
172 		list_for_each_entry(net, net_exit_list, exit_list) {
173 			ops->exit(net);
174 			cond_resched();
175 		}
176 	}
177 	if (ops->exit_batch)
178 		ops->exit_batch(net_exit_list);
179 }
180 
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)181 static void ops_free_list(const struct pernet_operations *ops,
182 			  struct list_head *net_exit_list)
183 {
184 	struct net *net;
185 
186 	if (ops->id) {
187 		list_for_each_entry(net, net_exit_list, exit_list)
188 			kfree(net_generic(net, *ops->id));
189 	}
190 }
191 
192 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)193 static int alloc_netid(struct net *net, struct net *peer, int reqid)
194 {
195 	int min = 0, max = 0;
196 
197 	if (reqid >= 0) {
198 		min = reqid;
199 		max = reqid + 1;
200 	}
201 
202 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
203 }
204 
205 /* This function is used by idr_for_each(). If net is equal to peer, the
206  * function returns the id so that idr_for_each() stops. Because we cannot
207  * returns the id 0 (idr_for_each() will not stop), we return the magic value
208  * NET_ID_ZERO (-1) for it.
209  */
210 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)211 static int net_eq_idr(int id, void *net, void *peer)
212 {
213 	if (net_eq(net, peer))
214 		return id ? : NET_ID_ZERO;
215 	return 0;
216 }
217 
218 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)219 static int __peernet2id(const struct net *net, struct net *peer)
220 {
221 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
222 
223 	/* Magic value for id 0. */
224 	if (id == NET_ID_ZERO)
225 		return 0;
226 	if (id > 0)
227 		return id;
228 
229 	return NETNSA_NSID_NOT_ASSIGNED;
230 }
231 
232 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
233 			      struct nlmsghdr *nlh, gfp_t gfp);
234 /* This function returns the id of a peer netns. If no id is assigned, one will
235  * be allocated and returned.
236  */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)237 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
238 {
239 	int id;
240 
241 	if (refcount_read(&net->ns.count) == 0)
242 		return NETNSA_NSID_NOT_ASSIGNED;
243 
244 	spin_lock_bh(&net->nsid_lock);
245 	id = __peernet2id(net, peer);
246 	if (id >= 0) {
247 		spin_unlock_bh(&net->nsid_lock);
248 		return id;
249 	}
250 
251 	/* When peer is obtained from RCU lists, we may race with
252 	 * its cleanup. Check whether it's alive, and this guarantees
253 	 * we never hash a peer back to net->netns_ids, after it has
254 	 * just been idr_remove()'d from there in cleanup_net().
255 	 */
256 	if (!maybe_get_net(peer)) {
257 		spin_unlock_bh(&net->nsid_lock);
258 		return NETNSA_NSID_NOT_ASSIGNED;
259 	}
260 
261 	id = alloc_netid(net, peer, -1);
262 	spin_unlock_bh(&net->nsid_lock);
263 
264 	put_net(peer);
265 	if (id < 0)
266 		return NETNSA_NSID_NOT_ASSIGNED;
267 
268 	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
269 
270 	return id;
271 }
272 EXPORT_SYMBOL_GPL(peernet2id_alloc);
273 
274 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)275 int peernet2id(const struct net *net, struct net *peer)
276 {
277 	int id;
278 
279 	rcu_read_lock();
280 	id = __peernet2id(net, peer);
281 	rcu_read_unlock();
282 
283 	return id;
284 }
285 EXPORT_SYMBOL(peernet2id);
286 
287 /* This function returns true is the peer netns has an id assigned into the
288  * current netns.
289  */
peernet_has_id(const struct net * net,struct net * peer)290 bool peernet_has_id(const struct net *net, struct net *peer)
291 {
292 	return peernet2id(net, peer) >= 0;
293 }
294 
get_net_ns_by_id(const struct net * net,int id)295 struct net *get_net_ns_by_id(const struct net *net, int id)
296 {
297 	struct net *peer;
298 
299 	if (id < 0)
300 		return NULL;
301 
302 	rcu_read_lock();
303 	peer = idr_find(&net->netns_ids, id);
304 	if (peer)
305 		peer = maybe_get_net(peer);
306 	rcu_read_unlock();
307 
308 	return peer;
309 }
310 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
311 
preinit_net_sysctl(struct net * net)312 static __net_init void preinit_net_sysctl(struct net *net)
313 {
314 	net->core.sysctl_somaxconn = SOMAXCONN;
315 	/* Limits per socket sk_omem_alloc usage.
316 	 * TCP zerocopy regular usage needs 128 KB.
317 	 */
318 	net->core.sysctl_optmem_max = 128 * 1024;
319 	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
320 }
321 
322 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)323 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
324 {
325 	refcount_set(&net->passive, 1);
326 	refcount_set(&net->ns.count, 1);
327 	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
328 	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
329 
330 	get_random_bytes(&net->hash_mix, sizeof(u32));
331 	net->dev_base_seq = 1;
332 	net->user_ns = user_ns;
333 
334 	idr_init(&net->netns_ids);
335 	spin_lock_init(&net->nsid_lock);
336 	mutex_init(&net->ipv4.ra_mutex);
337 	preinit_net_sysctl(net);
338 }
339 
340 /*
341  * setup_net runs the initializers for the network namespace object.
342  */
setup_net(struct net * net)343 static __net_init int setup_net(struct net *net)
344 {
345 	/* Must be called with pernet_ops_rwsem held */
346 	const struct pernet_operations *ops, *saved_ops;
347 	LIST_HEAD(net_exit_list);
348 	LIST_HEAD(dev_kill_list);
349 	int error = 0;
350 
351 	preempt_disable();
352 	net->net_cookie = gen_cookie_next(&net_cookie);
353 	preempt_enable();
354 
355 	list_for_each_entry(ops, &pernet_list, list) {
356 		error = ops_init(ops, net);
357 		if (error < 0)
358 			goto out_undo;
359 	}
360 	down_write(&net_rwsem);
361 	list_add_tail_rcu(&net->list, &net_namespace_list);
362 	up_write(&net_rwsem);
363 out:
364 	return error;
365 
366 out_undo:
367 	/* Walk through the list backwards calling the exit functions
368 	 * for the pernet modules whose init functions did not fail.
369 	 */
370 	list_add(&net->exit_list, &net_exit_list);
371 	saved_ops = ops;
372 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
373 		ops_pre_exit_list(ops, &net_exit_list);
374 
375 	synchronize_rcu();
376 
377 	ops = saved_ops;
378 	rtnl_lock();
379 	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
380 		if (ops->exit_batch_rtnl)
381 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
382 	}
383 	unregister_netdevice_many(&dev_kill_list);
384 	rtnl_unlock();
385 
386 	ops = saved_ops;
387 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
388 		ops_exit_list(ops, &net_exit_list);
389 
390 	ops = saved_ops;
391 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
392 		ops_free_list(ops, &net_exit_list);
393 
394 	rcu_barrier();
395 	goto out;
396 }
397 
398 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)399 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
400 {
401 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
402 }
403 
dec_net_namespaces(struct ucounts * ucounts)404 static void dec_net_namespaces(struct ucounts *ucounts)
405 {
406 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
407 }
408 
409 static struct kmem_cache *net_cachep __ro_after_init;
410 static struct workqueue_struct *netns_wq;
411 
net_alloc(void)412 static struct net *net_alloc(void)
413 {
414 	struct net *net = NULL;
415 	struct net_generic *ng;
416 
417 	ng = net_alloc_generic();
418 	if (!ng)
419 		goto out;
420 
421 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
422 	if (!net)
423 		goto out_free;
424 
425 #ifdef CONFIG_KEYS
426 	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
427 	if (!net->key_domain)
428 		goto out_free_2;
429 	refcount_set(&net->key_domain->usage, 1);
430 #endif
431 
432 	rcu_assign_pointer(net->gen, ng);
433 out:
434 	return net;
435 
436 #ifdef CONFIG_KEYS
437 out_free_2:
438 	kmem_cache_free(net_cachep, net);
439 	net = NULL;
440 #endif
441 out_free:
442 	kfree(ng);
443 	goto out;
444 }
445 
net_free(struct net * net)446 static void net_free(struct net *net)
447 {
448 	if (refcount_dec_and_test(&net->passive)) {
449 		kfree(rcu_access_pointer(net->gen));
450 
451 		/* There should not be any trackers left there. */
452 		ref_tracker_dir_exit(&net->notrefcnt_tracker);
453 
454 		kmem_cache_free(net_cachep, net);
455 	}
456 }
457 
net_drop_ns(void * p)458 void net_drop_ns(void *p)
459 {
460 	struct net *net = (struct net *)p;
461 
462 	if (net)
463 		net_free(net);
464 }
465 
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)466 struct net *copy_net_ns(unsigned long flags,
467 			struct user_namespace *user_ns, struct net *old_net)
468 {
469 	struct ucounts *ucounts;
470 	struct net *net;
471 	int rv;
472 
473 	if (!(flags & CLONE_NEWNET))
474 		return get_net(old_net);
475 
476 	ucounts = inc_net_namespaces(user_ns);
477 	if (!ucounts)
478 		return ERR_PTR(-ENOSPC);
479 
480 	net = net_alloc();
481 	if (!net) {
482 		rv = -ENOMEM;
483 		goto dec_ucounts;
484 	}
485 
486 	preinit_net(net, user_ns);
487 	net->ucounts = ucounts;
488 	get_user_ns(user_ns);
489 
490 	rv = down_read_killable(&pernet_ops_rwsem);
491 	if (rv < 0)
492 		goto put_userns;
493 
494 	rv = setup_net(net);
495 
496 	up_read(&pernet_ops_rwsem);
497 
498 	if (rv < 0) {
499 put_userns:
500 #ifdef CONFIG_KEYS
501 		key_remove_domain(net->key_domain);
502 #endif
503 		put_user_ns(user_ns);
504 		net_free(net);
505 dec_ucounts:
506 		dec_net_namespaces(ucounts);
507 		return ERR_PTR(rv);
508 	}
509 	return net;
510 }
511 
512 /**
513  * net_ns_get_ownership - get sysfs ownership data for @net
514  * @net: network namespace in question (can be NULL)
515  * @uid: kernel user ID for sysfs objects
516  * @gid: kernel group ID for sysfs objects
517  *
518  * Returns the uid/gid pair of root in the user namespace associated with the
519  * given network namespace.
520  */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)521 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
522 {
523 	if (net) {
524 		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
525 		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
526 
527 		if (uid_valid(ns_root_uid))
528 			*uid = ns_root_uid;
529 
530 		if (gid_valid(ns_root_gid))
531 			*gid = ns_root_gid;
532 	} else {
533 		*uid = GLOBAL_ROOT_UID;
534 		*gid = GLOBAL_ROOT_GID;
535 	}
536 }
537 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
538 
unhash_nsid(struct net * net,struct net * last)539 static void unhash_nsid(struct net *net, struct net *last)
540 {
541 	struct net *tmp;
542 	/* This function is only called from cleanup_net() work,
543 	 * and this work is the only process, that may delete
544 	 * a net from net_namespace_list. So, when the below
545 	 * is executing, the list may only grow. Thus, we do not
546 	 * use for_each_net_rcu() or net_rwsem.
547 	 */
548 	for_each_net(tmp) {
549 		int id;
550 
551 		spin_lock_bh(&tmp->nsid_lock);
552 		id = __peernet2id(tmp, net);
553 		if (id >= 0)
554 			idr_remove(&tmp->netns_ids, id);
555 		spin_unlock_bh(&tmp->nsid_lock);
556 		if (id >= 0)
557 			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
558 					  GFP_KERNEL);
559 		if (tmp == last)
560 			break;
561 	}
562 	spin_lock_bh(&net->nsid_lock);
563 	idr_destroy(&net->netns_ids);
564 	spin_unlock_bh(&net->nsid_lock);
565 }
566 
567 static LLIST_HEAD(cleanup_list);
568 
cleanup_net(struct work_struct * work)569 static void cleanup_net(struct work_struct *work)
570 {
571 	const struct pernet_operations *ops;
572 	struct net *net, *tmp, *last;
573 	struct llist_node *net_kill_list;
574 	LIST_HEAD(net_exit_list);
575 	LIST_HEAD(dev_kill_list);
576 
577 	/* Atomically snapshot the list of namespaces to cleanup */
578 	net_kill_list = llist_del_all(&cleanup_list);
579 
580 	down_read(&pernet_ops_rwsem);
581 
582 	/* Don't let anyone else find us. */
583 	down_write(&net_rwsem);
584 	llist_for_each_entry(net, net_kill_list, cleanup_list)
585 		list_del_rcu(&net->list);
586 	/* Cache last net. After we unlock rtnl, no one new net
587 	 * added to net_namespace_list can assign nsid pointer
588 	 * to a net from net_kill_list (see peernet2id_alloc()).
589 	 * So, we skip them in unhash_nsid().
590 	 *
591 	 * Note, that unhash_nsid() does not delete nsid links
592 	 * between net_kill_list's nets, as they've already
593 	 * deleted from net_namespace_list. But, this would be
594 	 * useless anyway, as netns_ids are destroyed there.
595 	 */
596 	last = list_last_entry(&net_namespace_list, struct net, list);
597 	up_write(&net_rwsem);
598 
599 	llist_for_each_entry(net, net_kill_list, cleanup_list) {
600 		unhash_nsid(net, last);
601 		list_add_tail(&net->exit_list, &net_exit_list);
602 	}
603 
604 	/* Run all of the network namespace pre_exit methods */
605 	list_for_each_entry_reverse(ops, &pernet_list, list)
606 		ops_pre_exit_list(ops, &net_exit_list);
607 
608 	/*
609 	 * Another CPU might be rcu-iterating the list, wait for it.
610 	 * This needs to be before calling the exit() notifiers, so
611 	 * the rcu_barrier() below isn't sufficient alone.
612 	 * Also the pre_exit() and exit() methods need this barrier.
613 	 */
614 	synchronize_rcu_expedited();
615 
616 	rtnl_lock();
617 	list_for_each_entry_reverse(ops, &pernet_list, list) {
618 		if (ops->exit_batch_rtnl)
619 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
620 	}
621 	unregister_netdevice_many(&dev_kill_list);
622 	rtnl_unlock();
623 
624 	/* Run all of the network namespace exit methods */
625 	list_for_each_entry_reverse(ops, &pernet_list, list)
626 		ops_exit_list(ops, &net_exit_list);
627 
628 	/* Free the net generic variables */
629 	list_for_each_entry_reverse(ops, &pernet_list, list)
630 		ops_free_list(ops, &net_exit_list);
631 
632 	up_read(&pernet_ops_rwsem);
633 
634 	/* Ensure there are no outstanding rcu callbacks using this
635 	 * network namespace.
636 	 */
637 	rcu_barrier();
638 
639 	/* Finally it is safe to free my network namespace structure */
640 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
641 		list_del_init(&net->exit_list);
642 		dec_net_namespaces(net->ucounts);
643 #ifdef CONFIG_KEYS
644 		key_remove_domain(net->key_domain);
645 #endif
646 		put_user_ns(net->user_ns);
647 		net_free(net);
648 	}
649 }
650 
651 /**
652  * net_ns_barrier - wait until concurrent net_cleanup_work is done
653  *
654  * cleanup_net runs from work queue and will first remove namespaces
655  * from the global list, then run net exit functions.
656  *
657  * Call this in module exit path to make sure that all netns
658  * ->exit ops have been invoked before the function is removed.
659  */
net_ns_barrier(void)660 void net_ns_barrier(void)
661 {
662 	down_write(&pernet_ops_rwsem);
663 	up_write(&pernet_ops_rwsem);
664 }
665 EXPORT_SYMBOL(net_ns_barrier);
666 
667 static DECLARE_WORK(net_cleanup_work, cleanup_net);
668 
__put_net(struct net * net)669 void __put_net(struct net *net)
670 {
671 	ref_tracker_dir_exit(&net->refcnt_tracker);
672 	/* Cleanup the network namespace in process context */
673 	if (llist_add(&net->cleanup_list, &cleanup_list))
674 		queue_work(netns_wq, &net_cleanup_work);
675 }
676 EXPORT_SYMBOL_GPL(__put_net);
677 
678 /**
679  * get_net_ns - increment the refcount of the network namespace
680  * @ns: common namespace (net)
681  *
682  * Returns the net's common namespace or ERR_PTR() if ref is zero.
683  */
get_net_ns(struct ns_common * ns)684 struct ns_common *get_net_ns(struct ns_common *ns)
685 {
686 	struct net *net;
687 
688 	net = maybe_get_net(container_of(ns, struct net, ns));
689 	if (net)
690 		return &net->ns;
691 	return ERR_PTR(-EINVAL);
692 }
693 EXPORT_SYMBOL_GPL(get_net_ns);
694 
get_net_ns_by_fd(int fd)695 struct net *get_net_ns_by_fd(int fd)
696 {
697 	struct fd f = fdget(fd);
698 	struct net *net = ERR_PTR(-EINVAL);
699 
700 	if (!fd_file(f))
701 		return ERR_PTR(-EBADF);
702 
703 	if (proc_ns_file(fd_file(f))) {
704 		struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
705 		if (ns->ops == &netns_operations)
706 			net = get_net(container_of(ns, struct net, ns));
707 	}
708 	fdput(f);
709 
710 	return net;
711 }
712 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
713 #endif
714 
get_net_ns_by_pid(pid_t pid)715 struct net *get_net_ns_by_pid(pid_t pid)
716 {
717 	struct task_struct *tsk;
718 	struct net *net;
719 
720 	/* Lookup the network namespace */
721 	net = ERR_PTR(-ESRCH);
722 	rcu_read_lock();
723 	tsk = find_task_by_vpid(pid);
724 	if (tsk) {
725 		struct nsproxy *nsproxy;
726 		task_lock(tsk);
727 		nsproxy = tsk->nsproxy;
728 		if (nsproxy)
729 			net = get_net(nsproxy->net_ns);
730 		task_unlock(tsk);
731 	}
732 	rcu_read_unlock();
733 	return net;
734 }
735 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
736 
net_ns_net_init(struct net * net)737 static __net_init int net_ns_net_init(struct net *net)
738 {
739 #ifdef CONFIG_NET_NS
740 	net->ns.ops = &netns_operations;
741 #endif
742 	return ns_alloc_inum(&net->ns);
743 }
744 
net_ns_net_exit(struct net * net)745 static __net_exit void net_ns_net_exit(struct net *net)
746 {
747 	ns_free_inum(&net->ns);
748 }
749 
750 static struct pernet_operations __net_initdata net_ns_ops = {
751 	.init = net_ns_net_init,
752 	.exit = net_ns_net_exit,
753 };
754 
755 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
756 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
757 	[NETNSA_NSID]		= { .type = NLA_S32 },
758 	[NETNSA_PID]		= { .type = NLA_U32 },
759 	[NETNSA_FD]		= { .type = NLA_U32 },
760 	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
761 };
762 
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)763 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
764 			  struct netlink_ext_ack *extack)
765 {
766 	struct net *net = sock_net(skb->sk);
767 	struct nlattr *tb[NETNSA_MAX + 1];
768 	struct nlattr *nla;
769 	struct net *peer;
770 	int nsid, err;
771 
772 	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
773 				     NETNSA_MAX, rtnl_net_policy, extack);
774 	if (err < 0)
775 		return err;
776 	if (!tb[NETNSA_NSID]) {
777 		NL_SET_ERR_MSG(extack, "nsid is missing");
778 		return -EINVAL;
779 	}
780 	nsid = nla_get_s32(tb[NETNSA_NSID]);
781 
782 	if (tb[NETNSA_PID]) {
783 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
784 		nla = tb[NETNSA_PID];
785 	} else if (tb[NETNSA_FD]) {
786 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
787 		nla = tb[NETNSA_FD];
788 	} else {
789 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
790 		return -EINVAL;
791 	}
792 	if (IS_ERR(peer)) {
793 		NL_SET_BAD_ATTR(extack, nla);
794 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
795 		return PTR_ERR(peer);
796 	}
797 
798 	spin_lock_bh(&net->nsid_lock);
799 	if (__peernet2id(net, peer) >= 0) {
800 		spin_unlock_bh(&net->nsid_lock);
801 		err = -EEXIST;
802 		NL_SET_BAD_ATTR(extack, nla);
803 		NL_SET_ERR_MSG(extack,
804 			       "Peer netns already has a nsid assigned");
805 		goto out;
806 	}
807 
808 	err = alloc_netid(net, peer, nsid);
809 	spin_unlock_bh(&net->nsid_lock);
810 	if (err >= 0) {
811 		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
812 				  nlh, GFP_KERNEL);
813 		err = 0;
814 	} else if (err == -ENOSPC && nsid >= 0) {
815 		err = -EEXIST;
816 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
817 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
818 	}
819 out:
820 	put_net(peer);
821 	return err;
822 }
823 
rtnl_net_get_size(void)824 static int rtnl_net_get_size(void)
825 {
826 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
827 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
828 	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
829 	       ;
830 }
831 
832 struct net_fill_args {
833 	u32 portid;
834 	u32 seq;
835 	int flags;
836 	int cmd;
837 	int nsid;
838 	bool add_ref;
839 	int ref_nsid;
840 };
841 
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)842 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
843 {
844 	struct nlmsghdr *nlh;
845 	struct rtgenmsg *rth;
846 
847 	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
848 			args->flags);
849 	if (!nlh)
850 		return -EMSGSIZE;
851 
852 	rth = nlmsg_data(nlh);
853 	rth->rtgen_family = AF_UNSPEC;
854 
855 	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
856 		goto nla_put_failure;
857 
858 	if (args->add_ref &&
859 	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
860 		goto nla_put_failure;
861 
862 	nlmsg_end(skb, nlh);
863 	return 0;
864 
865 nla_put_failure:
866 	nlmsg_cancel(skb, nlh);
867 	return -EMSGSIZE;
868 }
869 
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)870 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
871 				    const struct nlmsghdr *nlh,
872 				    struct nlattr **tb,
873 				    struct netlink_ext_ack *extack)
874 {
875 	int i, err;
876 
877 	if (!netlink_strict_get_check(skb))
878 		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
879 					      tb, NETNSA_MAX, rtnl_net_policy,
880 					      extack);
881 
882 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
883 					    NETNSA_MAX, rtnl_net_policy,
884 					    extack);
885 	if (err)
886 		return err;
887 
888 	for (i = 0; i <= NETNSA_MAX; i++) {
889 		if (!tb[i])
890 			continue;
891 
892 		switch (i) {
893 		case NETNSA_PID:
894 		case NETNSA_FD:
895 		case NETNSA_NSID:
896 		case NETNSA_TARGET_NSID:
897 			break;
898 		default:
899 			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
900 			return -EINVAL;
901 		}
902 	}
903 
904 	return 0;
905 }
906 
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)907 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
908 			  struct netlink_ext_ack *extack)
909 {
910 	struct net *net = sock_net(skb->sk);
911 	struct nlattr *tb[NETNSA_MAX + 1];
912 	struct net_fill_args fillargs = {
913 		.portid = NETLINK_CB(skb).portid,
914 		.seq = nlh->nlmsg_seq,
915 		.cmd = RTM_NEWNSID,
916 	};
917 	struct net *peer, *target = net;
918 	struct nlattr *nla;
919 	struct sk_buff *msg;
920 	int err;
921 
922 	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
923 	if (err < 0)
924 		return err;
925 	if (tb[NETNSA_PID]) {
926 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
927 		nla = tb[NETNSA_PID];
928 	} else if (tb[NETNSA_FD]) {
929 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
930 		nla = tb[NETNSA_FD];
931 	} else if (tb[NETNSA_NSID]) {
932 		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
933 		if (!peer)
934 			peer = ERR_PTR(-ENOENT);
935 		nla = tb[NETNSA_NSID];
936 	} else {
937 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
938 		return -EINVAL;
939 	}
940 
941 	if (IS_ERR(peer)) {
942 		NL_SET_BAD_ATTR(extack, nla);
943 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
944 		return PTR_ERR(peer);
945 	}
946 
947 	if (tb[NETNSA_TARGET_NSID]) {
948 		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
949 
950 		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
951 		if (IS_ERR(target)) {
952 			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
953 			NL_SET_ERR_MSG(extack,
954 				       "Target netns reference is invalid");
955 			err = PTR_ERR(target);
956 			goto out;
957 		}
958 		fillargs.add_ref = true;
959 		fillargs.ref_nsid = peernet2id(net, peer);
960 	}
961 
962 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
963 	if (!msg) {
964 		err = -ENOMEM;
965 		goto out;
966 	}
967 
968 	fillargs.nsid = peernet2id(target, peer);
969 	err = rtnl_net_fill(msg, &fillargs);
970 	if (err < 0)
971 		goto err_out;
972 
973 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
974 	goto out;
975 
976 err_out:
977 	nlmsg_free(msg);
978 out:
979 	if (fillargs.add_ref)
980 		put_net(target);
981 	put_net(peer);
982 	return err;
983 }
984 
985 struct rtnl_net_dump_cb {
986 	struct net *tgt_net;
987 	struct net *ref_net;
988 	struct sk_buff *skb;
989 	struct net_fill_args fillargs;
990 	int idx;
991 	int s_idx;
992 };
993 
994 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)995 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
996 {
997 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
998 	int ret;
999 
1000 	if (net_cb->idx < net_cb->s_idx)
1001 		goto cont;
1002 
1003 	net_cb->fillargs.nsid = id;
1004 	if (net_cb->fillargs.add_ref)
1005 		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1006 	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1007 	if (ret < 0)
1008 		return ret;
1009 
1010 cont:
1011 	net_cb->idx++;
1012 	return 0;
1013 }
1014 
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1015 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1016 				   struct rtnl_net_dump_cb *net_cb,
1017 				   struct netlink_callback *cb)
1018 {
1019 	struct netlink_ext_ack *extack = cb->extack;
1020 	struct nlattr *tb[NETNSA_MAX + 1];
1021 	int err, i;
1022 
1023 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1024 					    NETNSA_MAX, rtnl_net_policy,
1025 					    extack);
1026 	if (err < 0)
1027 		return err;
1028 
1029 	for (i = 0; i <= NETNSA_MAX; i++) {
1030 		if (!tb[i])
1031 			continue;
1032 
1033 		if (i == NETNSA_TARGET_NSID) {
1034 			struct net *net;
1035 
1036 			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1037 			if (IS_ERR(net)) {
1038 				NL_SET_BAD_ATTR(extack, tb[i]);
1039 				NL_SET_ERR_MSG(extack,
1040 					       "Invalid target network namespace id");
1041 				return PTR_ERR(net);
1042 			}
1043 			net_cb->fillargs.add_ref = true;
1044 			net_cb->ref_net = net_cb->tgt_net;
1045 			net_cb->tgt_net = net;
1046 		} else {
1047 			NL_SET_BAD_ATTR(extack, tb[i]);
1048 			NL_SET_ERR_MSG(extack,
1049 				       "Unsupported attribute in dump request");
1050 			return -EINVAL;
1051 		}
1052 	}
1053 
1054 	return 0;
1055 }
1056 
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1057 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1058 {
1059 	struct rtnl_net_dump_cb net_cb = {
1060 		.tgt_net = sock_net(skb->sk),
1061 		.skb = skb,
1062 		.fillargs = {
1063 			.portid = NETLINK_CB(cb->skb).portid,
1064 			.seq = cb->nlh->nlmsg_seq,
1065 			.flags = NLM_F_MULTI,
1066 			.cmd = RTM_NEWNSID,
1067 		},
1068 		.idx = 0,
1069 		.s_idx = cb->args[0],
1070 	};
1071 	int err = 0;
1072 
1073 	if (cb->strict_check) {
1074 		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1075 		if (err < 0)
1076 			goto end;
1077 	}
1078 
1079 	rcu_read_lock();
1080 	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1081 	rcu_read_unlock();
1082 
1083 	cb->args[0] = net_cb.idx;
1084 end:
1085 	if (net_cb.fillargs.add_ref)
1086 		put_net(net_cb.tgt_net);
1087 	return err;
1088 }
1089 
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1090 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1091 			      struct nlmsghdr *nlh, gfp_t gfp)
1092 {
1093 	struct net_fill_args fillargs = {
1094 		.portid = portid,
1095 		.seq = nlh ? nlh->nlmsg_seq : 0,
1096 		.cmd = cmd,
1097 		.nsid = id,
1098 	};
1099 	struct sk_buff *msg;
1100 	int err = -ENOMEM;
1101 
1102 	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1103 	if (!msg)
1104 		goto out;
1105 
1106 	err = rtnl_net_fill(msg, &fillargs);
1107 	if (err < 0)
1108 		goto err_out;
1109 
1110 	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1111 	return;
1112 
1113 err_out:
1114 	nlmsg_free(msg);
1115 out:
1116 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1117 }
1118 
1119 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1120 static void __init netns_ipv4_struct_check(void)
1121 {
1122 	/* TX readonly hotpath cache lines */
1123 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1124 				      sysctl_tcp_early_retrans);
1125 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1126 				      sysctl_tcp_tso_win_divisor);
1127 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1128 				      sysctl_tcp_tso_rtt_log);
1129 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1130 				      sysctl_tcp_autocorking);
1131 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1132 				      sysctl_tcp_min_snd_mss);
1133 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1134 				      sysctl_tcp_notsent_lowat);
1135 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1136 				      sysctl_tcp_limit_output_bytes);
1137 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1138 				      sysctl_tcp_min_rtt_wlen);
1139 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1140 				      sysctl_tcp_wmem);
1141 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1142 				      sysctl_ip_fwd_use_pmtu);
1143 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1144 
1145 	/* TXRX readonly hotpath cache lines */
1146 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1147 				      sysctl_tcp_moderate_rcvbuf);
1148 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1149 
1150 	/* RX readonly hotpath cache line */
1151 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1152 				      sysctl_ip_early_demux);
1153 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1154 				      sysctl_tcp_early_demux);
1155 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1156 				      sysctl_tcp_reordering);
1157 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1158 				      sysctl_tcp_rmem);
1159 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
1160 }
1161 #endif
1162 
net_ns_init(void)1163 void __init net_ns_init(void)
1164 {
1165 	struct net_generic *ng;
1166 
1167 #ifdef CONFIG_NET_NS
1168 	netns_ipv4_struct_check();
1169 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1170 					SMP_CACHE_BYTES,
1171 					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1172 
1173 	/* Create workqueue for cleanup */
1174 	netns_wq = create_singlethread_workqueue("netns");
1175 	if (!netns_wq)
1176 		panic("Could not create netns workq");
1177 #endif
1178 
1179 	ng = net_alloc_generic();
1180 	if (!ng)
1181 		panic("Could not allocate generic netns");
1182 
1183 	rcu_assign_pointer(init_net.gen, ng);
1184 
1185 #ifdef CONFIG_KEYS
1186 	init_net.key_domain = &init_net_key_domain;
1187 #endif
1188 	preinit_net(&init_net, &init_user_ns);
1189 
1190 	down_write(&pernet_ops_rwsem);
1191 	if (setup_net(&init_net))
1192 		panic("Could not setup the initial network namespace");
1193 
1194 	init_net_initialized = true;
1195 	up_write(&pernet_ops_rwsem);
1196 
1197 	if (register_pernet_subsys(&net_ns_ops))
1198 		panic("Could not register network namespace subsystems");
1199 
1200 	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
1201 		      RTNL_FLAG_DOIT_UNLOCKED);
1202 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
1203 		      RTNL_FLAG_DOIT_UNLOCKED |
1204 		      RTNL_FLAG_DUMP_UNLOCKED);
1205 }
1206 
free_exit_list(struct pernet_operations * ops,struct list_head * net_exit_list)1207 static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1208 {
1209 	ops_pre_exit_list(ops, net_exit_list);
1210 	synchronize_rcu();
1211 
1212 	if (ops->exit_batch_rtnl) {
1213 		LIST_HEAD(dev_kill_list);
1214 
1215 		rtnl_lock();
1216 		ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1217 		unregister_netdevice_many(&dev_kill_list);
1218 		rtnl_unlock();
1219 	}
1220 	ops_exit_list(ops, net_exit_list);
1221 
1222 	ops_free_list(ops, net_exit_list);
1223 }
1224 
1225 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1226 static int __register_pernet_operations(struct list_head *list,
1227 					struct pernet_operations *ops)
1228 {
1229 	struct net *net;
1230 	int error;
1231 	LIST_HEAD(net_exit_list);
1232 
1233 	list_add_tail(&ops->list, list);
1234 	if (ops->init || ops->id) {
1235 		/* We held write locked pernet_ops_rwsem, and parallel
1236 		 * setup_net() and cleanup_net() are not possible.
1237 		 */
1238 		for_each_net(net) {
1239 			error = ops_init(ops, net);
1240 			if (error)
1241 				goto out_undo;
1242 			list_add_tail(&net->exit_list, &net_exit_list);
1243 		}
1244 	}
1245 	return 0;
1246 
1247 out_undo:
1248 	/* If I have an error cleanup all namespaces I initialized */
1249 	list_del(&ops->list);
1250 	free_exit_list(ops, &net_exit_list);
1251 	return error;
1252 }
1253 
__unregister_pernet_operations(struct pernet_operations * ops)1254 static void __unregister_pernet_operations(struct pernet_operations *ops)
1255 {
1256 	struct net *net;
1257 	LIST_HEAD(net_exit_list);
1258 
1259 	list_del(&ops->list);
1260 	/* See comment in __register_pernet_operations() */
1261 	for_each_net(net)
1262 		list_add_tail(&net->exit_list, &net_exit_list);
1263 
1264 	free_exit_list(ops, &net_exit_list);
1265 }
1266 
1267 #else
1268 
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1269 static int __register_pernet_operations(struct list_head *list,
1270 					struct pernet_operations *ops)
1271 {
1272 	if (!init_net_initialized) {
1273 		list_add_tail(&ops->list, list);
1274 		return 0;
1275 	}
1276 
1277 	return ops_init(ops, &init_net);
1278 }
1279 
__unregister_pernet_operations(struct pernet_operations * ops)1280 static void __unregister_pernet_operations(struct pernet_operations *ops)
1281 {
1282 	if (!init_net_initialized) {
1283 		list_del(&ops->list);
1284 	} else {
1285 		LIST_HEAD(net_exit_list);
1286 		list_add(&init_net.exit_list, &net_exit_list);
1287 		free_exit_list(ops, &net_exit_list);
1288 	}
1289 }
1290 
1291 #endif /* CONFIG_NET_NS */
1292 
1293 static DEFINE_IDA(net_generic_ids);
1294 
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1295 static int register_pernet_operations(struct list_head *list,
1296 				      struct pernet_operations *ops)
1297 {
1298 	int error;
1299 
1300 	if (WARN_ON(!!ops->id ^ !!ops->size))
1301 		return -EINVAL;
1302 
1303 	if (ops->id) {
1304 		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1305 				GFP_KERNEL);
1306 		if (error < 0)
1307 			return error;
1308 		*ops->id = error;
1309 		/* This does not require READ_ONCE as writers already hold
1310 		 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1311 		 * net_alloc_generic.
1312 		 */
1313 		WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1314 	}
1315 	error = __register_pernet_operations(list, ops);
1316 	if (error) {
1317 		rcu_barrier();
1318 		if (ops->id)
1319 			ida_free(&net_generic_ids, *ops->id);
1320 	}
1321 
1322 	return error;
1323 }
1324 
unregister_pernet_operations(struct pernet_operations * ops)1325 static void unregister_pernet_operations(struct pernet_operations *ops)
1326 {
1327 	__unregister_pernet_operations(ops);
1328 	rcu_barrier();
1329 	if (ops->id)
1330 		ida_free(&net_generic_ids, *ops->id);
1331 }
1332 
1333 /**
1334  *      register_pernet_subsys - register a network namespace subsystem
1335  *	@ops:  pernet operations structure for the subsystem
1336  *
1337  *	Register a subsystem which has init and exit functions
1338  *	that are called when network namespaces are created and
1339  *	destroyed respectively.
1340  *
1341  *	When registered all network namespace init functions are
1342  *	called for every existing network namespace.  Allowing kernel
1343  *	modules to have a race free view of the set of network namespaces.
1344  *
1345  *	When a new network namespace is created all of the init
1346  *	methods are called in the order in which they were registered.
1347  *
1348  *	When a network namespace is destroyed all of the exit methods
1349  *	are called in the reverse of the order with which they were
1350  *	registered.
1351  */
register_pernet_subsys(struct pernet_operations * ops)1352 int register_pernet_subsys(struct pernet_operations *ops)
1353 {
1354 	int error;
1355 	down_write(&pernet_ops_rwsem);
1356 	error =  register_pernet_operations(first_device, ops);
1357 	up_write(&pernet_ops_rwsem);
1358 	return error;
1359 }
1360 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1361 
1362 /**
1363  *      unregister_pernet_subsys - unregister a network namespace subsystem
1364  *	@ops: pernet operations structure to manipulate
1365  *
1366  *	Remove the pernet operations structure from the list to be
1367  *	used when network namespaces are created or destroyed.  In
1368  *	addition run the exit method for all existing network
1369  *	namespaces.
1370  */
unregister_pernet_subsys(struct pernet_operations * ops)1371 void unregister_pernet_subsys(struct pernet_operations *ops)
1372 {
1373 	down_write(&pernet_ops_rwsem);
1374 	unregister_pernet_operations(ops);
1375 	up_write(&pernet_ops_rwsem);
1376 }
1377 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1378 
1379 /**
1380  *      register_pernet_device - register a network namespace device
1381  *	@ops:  pernet operations structure for the subsystem
1382  *
1383  *	Register a device which has init and exit functions
1384  *	that are called when network namespaces are created and
1385  *	destroyed respectively.
1386  *
1387  *	When registered all network namespace init functions are
1388  *	called for every existing network namespace.  Allowing kernel
1389  *	modules to have a race free view of the set of network namespaces.
1390  *
1391  *	When a new network namespace is created all of the init
1392  *	methods are called in the order in which they were registered.
1393  *
1394  *	When a network namespace is destroyed all of the exit methods
1395  *	are called in the reverse of the order with which they were
1396  *	registered.
1397  */
register_pernet_device(struct pernet_operations * ops)1398 int register_pernet_device(struct pernet_operations *ops)
1399 {
1400 	int error;
1401 	down_write(&pernet_ops_rwsem);
1402 	error = register_pernet_operations(&pernet_list, ops);
1403 	if (!error && (first_device == &pernet_list))
1404 		first_device = &ops->list;
1405 	up_write(&pernet_ops_rwsem);
1406 	return error;
1407 }
1408 EXPORT_SYMBOL_GPL(register_pernet_device);
1409 
1410 /**
1411  *      unregister_pernet_device - unregister a network namespace netdevice
1412  *	@ops: pernet operations structure to manipulate
1413  *
1414  *	Remove the pernet operations structure from the list to be
1415  *	used when network namespaces are created or destroyed.  In
1416  *	addition run the exit method for all existing network
1417  *	namespaces.
1418  */
unregister_pernet_device(struct pernet_operations * ops)1419 void unregister_pernet_device(struct pernet_operations *ops)
1420 {
1421 	down_write(&pernet_ops_rwsem);
1422 	if (&ops->list == first_device)
1423 		first_device = first_device->next;
1424 	unregister_pernet_operations(ops);
1425 	up_write(&pernet_ops_rwsem);
1426 }
1427 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1428 
1429 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1430 static struct ns_common *netns_get(struct task_struct *task)
1431 {
1432 	struct net *net = NULL;
1433 	struct nsproxy *nsproxy;
1434 
1435 	task_lock(task);
1436 	nsproxy = task->nsproxy;
1437 	if (nsproxy)
1438 		net = get_net(nsproxy->net_ns);
1439 	task_unlock(task);
1440 
1441 	return net ? &net->ns : NULL;
1442 }
1443 
to_net_ns(struct ns_common * ns)1444 static inline struct net *to_net_ns(struct ns_common *ns)
1445 {
1446 	return container_of(ns, struct net, ns);
1447 }
1448 
netns_put(struct ns_common * ns)1449 static void netns_put(struct ns_common *ns)
1450 {
1451 	put_net(to_net_ns(ns));
1452 }
1453 
netns_install(struct nsset * nsset,struct ns_common * ns)1454 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1455 {
1456 	struct nsproxy *nsproxy = nsset->nsproxy;
1457 	struct net *net = to_net_ns(ns);
1458 
1459 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1460 	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1461 		return -EPERM;
1462 
1463 	put_net(nsproxy->net_ns);
1464 	nsproxy->net_ns = get_net(net);
1465 	return 0;
1466 }
1467 
netns_owner(struct ns_common * ns)1468 static struct user_namespace *netns_owner(struct ns_common *ns)
1469 {
1470 	return to_net_ns(ns)->user_ns;
1471 }
1472 
1473 const struct proc_ns_operations netns_operations = {
1474 	.name		= "net",
1475 	.type		= CLONE_NEWNET,
1476 	.get		= netns_get,
1477 	.put		= netns_put,
1478 	.install	= netns_install,
1479 	.owner		= netns_owner,
1480 };
1481 #endif
1482