1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/namespace.c
4 *
5 * (C) Copyright Al Viro 2000, 2001
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 #include <linux/nospec.h>
36
37 #include "pnode.h"
38 #include "internal.h"
39
40 /* Maximum number of mounts in a mount namespace */
41 static unsigned int sysctl_mount_max __read_mostly = 100000;
42
43 static unsigned int m_hash_mask __ro_after_init;
44 static unsigned int m_hash_shift __ro_after_init;
45 static unsigned int mp_hash_mask __ro_after_init;
46 static unsigned int mp_hash_shift __ro_after_init;
47
48 static __initdata unsigned long mhash_entries;
set_mhash_entries(char * str)49 static int __init set_mhash_entries(char *str)
50 {
51 if (!str)
52 return 0;
53 mhash_entries = simple_strtoul(str, &str, 0);
54 return 1;
55 }
56 __setup("mhash_entries=", set_mhash_entries);
57
58 static __initdata unsigned long mphash_entries;
set_mphash_entries(char * str)59 static int __init set_mphash_entries(char *str)
60 {
61 if (!str)
62 return 0;
63 mphash_entries = simple_strtoul(str, &str, 0);
64 return 1;
65 }
66 __setup("mphash_entries=", set_mphash_entries);
67
68 static u64 event;
69 static DEFINE_IDA(mnt_id_ida);
70 static DEFINE_IDA(mnt_group_ida);
71
72 /* Don't allow confusion with old 32bit mount ID */
73 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
74 static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET);
75
76 static struct hlist_head *mount_hashtable __ro_after_init;
77 static struct hlist_head *mountpoint_hashtable __ro_after_init;
78 static struct kmem_cache *mnt_cache __ro_after_init;
79 static DECLARE_RWSEM(namespace_sem);
80 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
81 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
82 static DEFINE_RWLOCK(mnt_ns_tree_lock);
83 static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
84
85 struct mount_kattr {
86 unsigned int attr_set;
87 unsigned int attr_clr;
88 unsigned int propagation;
89 unsigned int lookup_flags;
90 bool recurse;
91 struct user_namespace *mnt_userns;
92 struct mnt_idmap *mnt_idmap;
93 };
94
95 /* /sys/fs */
96 struct kobject *fs_kobj __ro_after_init;
97 EXPORT_SYMBOL_GPL(fs_kobj);
98
99 /*
100 * vfsmount lock may be taken for read to prevent changes to the
101 * vfsmount hash, ie. during mountpoint lookups or walking back
102 * up the tree.
103 *
104 * It should be taken for write in all cases where the vfsmount
105 * tree or hash is modified or when a vfsmount structure is modified.
106 */
107 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
108
mnt_ns_cmp(u64 seq,const struct mnt_namespace * ns)109 static int mnt_ns_cmp(u64 seq, const struct mnt_namespace *ns)
110 {
111 u64 seq_b = ns->seq;
112
113 if (seq < seq_b)
114 return -1;
115 if (seq > seq_b)
116 return 1;
117 return 0;
118 }
119
node_to_mnt_ns(const struct rb_node * node)120 static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
121 {
122 if (!node)
123 return NULL;
124 return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node);
125 }
126
mnt_ns_less(struct rb_node * a,const struct rb_node * b)127 static bool mnt_ns_less(struct rb_node *a, const struct rb_node *b)
128 {
129 struct mnt_namespace *ns_a = node_to_mnt_ns(a);
130 struct mnt_namespace *ns_b = node_to_mnt_ns(b);
131 u64 seq_a = ns_a->seq;
132
133 return mnt_ns_cmp(seq_a, ns_b) < 0;
134 }
135
mnt_ns_tree_add(struct mnt_namespace * ns)136 static void mnt_ns_tree_add(struct mnt_namespace *ns)
137 {
138 guard(write_lock)(&mnt_ns_tree_lock);
139 rb_add(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_less);
140 }
141
mnt_ns_release(struct mnt_namespace * ns)142 static void mnt_ns_release(struct mnt_namespace *ns)
143 {
144 lockdep_assert_not_held(&mnt_ns_tree_lock);
145
146 /* keep alive for {list,stat}mount() */
147 if (refcount_dec_and_test(&ns->passive)) {
148 put_user_ns(ns->user_ns);
149 kfree(ns);
150 }
151 }
DEFINE_FREE(mnt_ns_release,struct mnt_namespace *,if (_T)mnt_ns_release (_T))152 DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
153
154 static void mnt_ns_tree_remove(struct mnt_namespace *ns)
155 {
156 /* remove from global mount namespace list */
157 if (!is_anon_ns(ns)) {
158 guard(write_lock)(&mnt_ns_tree_lock);
159 rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
160 }
161
162 mnt_ns_release(ns);
163 }
164
165 /*
166 * Returns the mount namespace which either has the specified id, or has the
167 * next smallest id afer the specified one.
168 */
mnt_ns_find_id_at(u64 mnt_ns_id)169 static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
170 {
171 struct rb_node *node = mnt_ns_tree.rb_node;
172 struct mnt_namespace *ret = NULL;
173
174 lockdep_assert_held(&mnt_ns_tree_lock);
175
176 while (node) {
177 struct mnt_namespace *n = node_to_mnt_ns(node);
178
179 if (mnt_ns_id <= n->seq) {
180 ret = node_to_mnt_ns(node);
181 if (mnt_ns_id == n->seq)
182 break;
183 node = node->rb_left;
184 } else {
185 node = node->rb_right;
186 }
187 }
188 return ret;
189 }
190
191 /*
192 * Lookup a mount namespace by id and take a passive reference count. Taking a
193 * passive reference means the mount namespace can be emptied if e.g., the last
194 * task holding an active reference exits. To access the mounts of the
195 * namespace the @namespace_sem must first be acquired. If the namespace has
196 * already shut down before acquiring @namespace_sem, {list,stat}mount() will
197 * see that the mount rbtree of the namespace is empty.
198 */
lookup_mnt_ns(u64 mnt_ns_id)199 static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
200 {
201 struct mnt_namespace *ns;
202
203 guard(read_lock)(&mnt_ns_tree_lock);
204 ns = mnt_ns_find_id_at(mnt_ns_id);
205 if (!ns || ns->seq != mnt_ns_id)
206 return NULL;
207
208 refcount_inc(&ns->passive);
209 return ns;
210 }
211
lock_mount_hash(void)212 static inline void lock_mount_hash(void)
213 {
214 write_seqlock(&mount_lock);
215 }
216
unlock_mount_hash(void)217 static inline void unlock_mount_hash(void)
218 {
219 write_sequnlock(&mount_lock);
220 }
221
m_hash(struct vfsmount * mnt,struct dentry * dentry)222 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
223 {
224 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
225 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
226 tmp = tmp + (tmp >> m_hash_shift);
227 return &mount_hashtable[tmp & m_hash_mask];
228 }
229
mp_hash(struct dentry * dentry)230 static inline struct hlist_head *mp_hash(struct dentry *dentry)
231 {
232 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
233 tmp = tmp + (tmp >> mp_hash_shift);
234 return &mountpoint_hashtable[tmp & mp_hash_mask];
235 }
236
mnt_alloc_id(struct mount * mnt)237 static int mnt_alloc_id(struct mount *mnt)
238 {
239 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
240
241 if (res < 0)
242 return res;
243 mnt->mnt_id = res;
244 mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
245 return 0;
246 }
247
mnt_free_id(struct mount * mnt)248 static void mnt_free_id(struct mount *mnt)
249 {
250 ida_free(&mnt_id_ida, mnt->mnt_id);
251 }
252
253 /*
254 * Allocate a new peer group ID
255 */
mnt_alloc_group_id(struct mount * mnt)256 static int mnt_alloc_group_id(struct mount *mnt)
257 {
258 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
259
260 if (res < 0)
261 return res;
262 mnt->mnt_group_id = res;
263 return 0;
264 }
265
266 /*
267 * Release a peer group ID
268 */
mnt_release_group_id(struct mount * mnt)269 void mnt_release_group_id(struct mount *mnt)
270 {
271 ida_free(&mnt_group_ida, mnt->mnt_group_id);
272 mnt->mnt_group_id = 0;
273 }
274
275 /*
276 * vfsmount lock must be held for read
277 */
mnt_add_count(struct mount * mnt,int n)278 static inline void mnt_add_count(struct mount *mnt, int n)
279 {
280 #ifdef CONFIG_SMP
281 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
282 #else
283 preempt_disable();
284 mnt->mnt_count += n;
285 preempt_enable();
286 #endif
287 }
288
289 /*
290 * vfsmount lock must be held for write
291 */
mnt_get_count(struct mount * mnt)292 int mnt_get_count(struct mount *mnt)
293 {
294 #ifdef CONFIG_SMP
295 int count = 0;
296 int cpu;
297
298 for_each_possible_cpu(cpu) {
299 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
300 }
301
302 return count;
303 #else
304 return mnt->mnt_count;
305 #endif
306 }
307
alloc_vfsmnt(const char * name)308 static struct mount *alloc_vfsmnt(const char *name)
309 {
310 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
311 if (mnt) {
312 int err;
313
314 err = mnt_alloc_id(mnt);
315 if (err)
316 goto out_free_cache;
317
318 if (name) {
319 mnt->mnt_devname = kstrdup_const(name,
320 GFP_KERNEL_ACCOUNT);
321 if (!mnt->mnt_devname)
322 goto out_free_id;
323 }
324
325 #ifdef CONFIG_SMP
326 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
327 if (!mnt->mnt_pcp)
328 goto out_free_devname;
329
330 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
331 #else
332 mnt->mnt_count = 1;
333 mnt->mnt_writers = 0;
334 #endif
335
336 INIT_HLIST_NODE(&mnt->mnt_hash);
337 INIT_LIST_HEAD(&mnt->mnt_child);
338 INIT_LIST_HEAD(&mnt->mnt_mounts);
339 INIT_LIST_HEAD(&mnt->mnt_list);
340 INIT_LIST_HEAD(&mnt->mnt_expire);
341 INIT_LIST_HEAD(&mnt->mnt_share);
342 INIT_LIST_HEAD(&mnt->mnt_slave_list);
343 INIT_LIST_HEAD(&mnt->mnt_slave);
344 INIT_HLIST_NODE(&mnt->mnt_mp_list);
345 INIT_LIST_HEAD(&mnt->mnt_umounting);
346 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
347 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
348 }
349 return mnt;
350
351 #ifdef CONFIG_SMP
352 out_free_devname:
353 kfree_const(mnt->mnt_devname);
354 #endif
355 out_free_id:
356 mnt_free_id(mnt);
357 out_free_cache:
358 kmem_cache_free(mnt_cache, mnt);
359 return NULL;
360 }
361
362 /*
363 * Most r/o checks on a fs are for operations that take
364 * discrete amounts of time, like a write() or unlink().
365 * We must keep track of when those operations start
366 * (for permission checks) and when they end, so that
367 * we can determine when writes are able to occur to
368 * a filesystem.
369 */
370 /*
371 * __mnt_is_readonly: check whether a mount is read-only
372 * @mnt: the mount to check for its write status
373 *
374 * This shouldn't be used directly ouside of the VFS.
375 * It does not guarantee that the filesystem will stay
376 * r/w, just that it is right *now*. This can not and
377 * should not be used in place of IS_RDONLY(inode).
378 * mnt_want/drop_write() will _keep_ the filesystem
379 * r/w.
380 */
__mnt_is_readonly(struct vfsmount * mnt)381 bool __mnt_is_readonly(struct vfsmount *mnt)
382 {
383 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
384 }
385 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
386
mnt_inc_writers(struct mount * mnt)387 static inline void mnt_inc_writers(struct mount *mnt)
388 {
389 #ifdef CONFIG_SMP
390 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
391 #else
392 mnt->mnt_writers++;
393 #endif
394 }
395
mnt_dec_writers(struct mount * mnt)396 static inline void mnt_dec_writers(struct mount *mnt)
397 {
398 #ifdef CONFIG_SMP
399 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
400 #else
401 mnt->mnt_writers--;
402 #endif
403 }
404
mnt_get_writers(struct mount * mnt)405 static unsigned int mnt_get_writers(struct mount *mnt)
406 {
407 #ifdef CONFIG_SMP
408 unsigned int count = 0;
409 int cpu;
410
411 for_each_possible_cpu(cpu) {
412 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
413 }
414
415 return count;
416 #else
417 return mnt->mnt_writers;
418 #endif
419 }
420
mnt_is_readonly(struct vfsmount * mnt)421 static int mnt_is_readonly(struct vfsmount *mnt)
422 {
423 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
424 return 1;
425 /*
426 * The barrier pairs with the barrier in sb_start_ro_state_change()
427 * making sure if we don't see s_readonly_remount set yet, we also will
428 * not see any superblock / mount flag changes done by remount.
429 * It also pairs with the barrier in sb_end_ro_state_change()
430 * assuring that if we see s_readonly_remount already cleared, we will
431 * see the values of superblock / mount flags updated by remount.
432 */
433 smp_rmb();
434 return __mnt_is_readonly(mnt);
435 }
436
437 /*
438 * Most r/o & frozen checks on a fs are for operations that take discrete
439 * amounts of time, like a write() or unlink(). We must keep track of when
440 * those operations start (for permission checks) and when they end, so that we
441 * can determine when writes are able to occur to a filesystem.
442 */
443 /**
444 * mnt_get_write_access - get write access to a mount without freeze protection
445 * @m: the mount on which to take a write
446 *
447 * This tells the low-level filesystem that a write is about to be performed to
448 * it, and makes sure that writes are allowed (mnt it read-write) before
449 * returning success. This operation does not protect against filesystem being
450 * frozen. When the write operation is finished, mnt_put_write_access() must be
451 * called. This is effectively a refcount.
452 */
mnt_get_write_access(struct vfsmount * m)453 int mnt_get_write_access(struct vfsmount *m)
454 {
455 struct mount *mnt = real_mount(m);
456 int ret = 0;
457
458 preempt_disable();
459 mnt_inc_writers(mnt);
460 /*
461 * The store to mnt_inc_writers must be visible before we pass
462 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
463 * incremented count after it has set MNT_WRITE_HOLD.
464 */
465 smp_mb();
466 might_lock(&mount_lock.lock);
467 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
468 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
469 cpu_relax();
470 } else {
471 /*
472 * This prevents priority inversion, if the task
473 * setting MNT_WRITE_HOLD got preempted on a remote
474 * CPU, and it prevents life lock if the task setting
475 * MNT_WRITE_HOLD has a lower priority and is bound to
476 * the same CPU as the task that is spinning here.
477 */
478 preempt_enable();
479 lock_mount_hash();
480 unlock_mount_hash();
481 preempt_disable();
482 }
483 }
484 /*
485 * The barrier pairs with the barrier sb_start_ro_state_change() making
486 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
487 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
488 * mnt_is_readonly() and bail in case we are racing with remount
489 * read-only.
490 */
491 smp_rmb();
492 if (mnt_is_readonly(m)) {
493 mnt_dec_writers(mnt);
494 ret = -EROFS;
495 }
496 preempt_enable();
497
498 return ret;
499 }
500 EXPORT_SYMBOL_GPL(mnt_get_write_access);
501
502 /**
503 * mnt_want_write - get write access to a mount
504 * @m: the mount on which to take a write
505 *
506 * This tells the low-level filesystem that a write is about to be performed to
507 * it, and makes sure that writes are allowed (mount is read-write, filesystem
508 * is not frozen) before returning success. When the write operation is
509 * finished, mnt_drop_write() must be called. This is effectively a refcount.
510 */
mnt_want_write(struct vfsmount * m)511 int mnt_want_write(struct vfsmount *m)
512 {
513 int ret;
514
515 sb_start_write(m->mnt_sb);
516 ret = mnt_get_write_access(m);
517 if (ret)
518 sb_end_write(m->mnt_sb);
519 return ret;
520 }
521 EXPORT_SYMBOL_GPL(mnt_want_write);
522
523 /**
524 * mnt_get_write_access_file - get write access to a file's mount
525 * @file: the file who's mount on which to take a write
526 *
527 * This is like mnt_get_write_access, but if @file is already open for write it
528 * skips incrementing mnt_writers (since the open file already has a reference)
529 * and instead only does the check for emergency r/o remounts. This must be
530 * paired with mnt_put_write_access_file.
531 */
mnt_get_write_access_file(struct file * file)532 int mnt_get_write_access_file(struct file *file)
533 {
534 if (file->f_mode & FMODE_WRITER) {
535 /*
536 * Superblock may have become readonly while there are still
537 * writable fd's, e.g. due to a fs error with errors=remount-ro
538 */
539 if (__mnt_is_readonly(file->f_path.mnt))
540 return -EROFS;
541 return 0;
542 }
543 return mnt_get_write_access(file->f_path.mnt);
544 }
545
546 /**
547 * mnt_want_write_file - get write access to a file's mount
548 * @file: the file who's mount on which to take a write
549 *
550 * This is like mnt_want_write, but if the file is already open for writing it
551 * skips incrementing mnt_writers (since the open file already has a reference)
552 * and instead only does the freeze protection and the check for emergency r/o
553 * remounts. This must be paired with mnt_drop_write_file.
554 */
mnt_want_write_file(struct file * file)555 int mnt_want_write_file(struct file *file)
556 {
557 int ret;
558
559 sb_start_write(file_inode(file)->i_sb);
560 ret = mnt_get_write_access_file(file);
561 if (ret)
562 sb_end_write(file_inode(file)->i_sb);
563 return ret;
564 }
565 EXPORT_SYMBOL_GPL(mnt_want_write_file);
566
567 /**
568 * mnt_put_write_access - give up write access to a mount
569 * @mnt: the mount on which to give up write access
570 *
571 * Tells the low-level filesystem that we are done
572 * performing writes to it. Must be matched with
573 * mnt_get_write_access() call above.
574 */
mnt_put_write_access(struct vfsmount * mnt)575 void mnt_put_write_access(struct vfsmount *mnt)
576 {
577 preempt_disable();
578 mnt_dec_writers(real_mount(mnt));
579 preempt_enable();
580 }
581 EXPORT_SYMBOL_GPL(mnt_put_write_access);
582
583 /**
584 * mnt_drop_write - give up write access to a mount
585 * @mnt: the mount on which to give up write access
586 *
587 * Tells the low-level filesystem that we are done performing writes to it and
588 * also allows filesystem to be frozen again. Must be matched with
589 * mnt_want_write() call above.
590 */
mnt_drop_write(struct vfsmount * mnt)591 void mnt_drop_write(struct vfsmount *mnt)
592 {
593 mnt_put_write_access(mnt);
594 sb_end_write(mnt->mnt_sb);
595 }
596 EXPORT_SYMBOL_GPL(mnt_drop_write);
597
mnt_put_write_access_file(struct file * file)598 void mnt_put_write_access_file(struct file *file)
599 {
600 if (!(file->f_mode & FMODE_WRITER))
601 mnt_put_write_access(file->f_path.mnt);
602 }
603
mnt_drop_write_file(struct file * file)604 void mnt_drop_write_file(struct file *file)
605 {
606 mnt_put_write_access_file(file);
607 sb_end_write(file_inode(file)->i_sb);
608 }
609 EXPORT_SYMBOL(mnt_drop_write_file);
610
611 /**
612 * mnt_hold_writers - prevent write access to the given mount
613 * @mnt: mnt to prevent write access to
614 *
615 * Prevents write access to @mnt if there are no active writers for @mnt.
616 * This function needs to be called and return successfully before changing
617 * properties of @mnt that need to remain stable for callers with write access
618 * to @mnt.
619 *
620 * After this functions has been called successfully callers must pair it with
621 * a call to mnt_unhold_writers() in order to stop preventing write access to
622 * @mnt.
623 *
624 * Context: This function expects lock_mount_hash() to be held serializing
625 * setting MNT_WRITE_HOLD.
626 * Return: On success 0 is returned.
627 * On error, -EBUSY is returned.
628 */
mnt_hold_writers(struct mount * mnt)629 static inline int mnt_hold_writers(struct mount *mnt)
630 {
631 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
632 /*
633 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
634 * should be visible before we do.
635 */
636 smp_mb();
637
638 /*
639 * With writers on hold, if this value is zero, then there are
640 * definitely no active writers (although held writers may subsequently
641 * increment the count, they'll have to wait, and decrement it after
642 * seeing MNT_READONLY).
643 *
644 * It is OK to have counter incremented on one CPU and decremented on
645 * another: the sum will add up correctly. The danger would be when we
646 * sum up each counter, if we read a counter before it is incremented,
647 * but then read another CPU's count which it has been subsequently
648 * decremented from -- we would see more decrements than we should.
649 * MNT_WRITE_HOLD protects against this scenario, because
650 * mnt_want_write first increments count, then smp_mb, then spins on
651 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
652 * we're counting up here.
653 */
654 if (mnt_get_writers(mnt) > 0)
655 return -EBUSY;
656
657 return 0;
658 }
659
660 /**
661 * mnt_unhold_writers - stop preventing write access to the given mount
662 * @mnt: mnt to stop preventing write access to
663 *
664 * Stop preventing write access to @mnt allowing callers to gain write access
665 * to @mnt again.
666 *
667 * This function can only be called after a successful call to
668 * mnt_hold_writers().
669 *
670 * Context: This function expects lock_mount_hash() to be held.
671 */
mnt_unhold_writers(struct mount * mnt)672 static inline void mnt_unhold_writers(struct mount *mnt)
673 {
674 /*
675 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
676 * that become unheld will see MNT_READONLY.
677 */
678 smp_wmb();
679 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
680 }
681
mnt_make_readonly(struct mount * mnt)682 static int mnt_make_readonly(struct mount *mnt)
683 {
684 int ret;
685
686 ret = mnt_hold_writers(mnt);
687 if (!ret)
688 mnt->mnt.mnt_flags |= MNT_READONLY;
689 mnt_unhold_writers(mnt);
690 return ret;
691 }
692
sb_prepare_remount_readonly(struct super_block * sb)693 int sb_prepare_remount_readonly(struct super_block *sb)
694 {
695 struct mount *mnt;
696 int err = 0;
697
698 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
699 if (atomic_long_read(&sb->s_remove_count))
700 return -EBUSY;
701
702 lock_mount_hash();
703 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
704 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
705 err = mnt_hold_writers(mnt);
706 if (err)
707 break;
708 }
709 }
710 if (!err && atomic_long_read(&sb->s_remove_count))
711 err = -EBUSY;
712
713 if (!err)
714 sb_start_ro_state_change(sb);
715 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
716 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
717 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
718 }
719 unlock_mount_hash();
720
721 return err;
722 }
723
free_vfsmnt(struct mount * mnt)724 static void free_vfsmnt(struct mount *mnt)
725 {
726 mnt_idmap_put(mnt_idmap(&mnt->mnt));
727 kfree_const(mnt->mnt_devname);
728 #ifdef CONFIG_SMP
729 free_percpu(mnt->mnt_pcp);
730 #endif
731 kmem_cache_free(mnt_cache, mnt);
732 }
733
delayed_free_vfsmnt(struct rcu_head * head)734 static void delayed_free_vfsmnt(struct rcu_head *head)
735 {
736 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
737 }
738
739 /* call under rcu_read_lock */
__legitimize_mnt(struct vfsmount * bastard,unsigned seq)740 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
741 {
742 struct mount *mnt;
743 if (read_seqretry(&mount_lock, seq))
744 return 1;
745 if (bastard == NULL)
746 return 0;
747 mnt = real_mount(bastard);
748 mnt_add_count(mnt, 1);
749 smp_mb(); // see mntput_no_expire()
750 if (likely(!read_seqretry(&mount_lock, seq)))
751 return 0;
752 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
753 mnt_add_count(mnt, -1);
754 return 1;
755 }
756 lock_mount_hash();
757 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
758 mnt_add_count(mnt, -1);
759 unlock_mount_hash();
760 return 1;
761 }
762 unlock_mount_hash();
763 /* caller will mntput() */
764 return -1;
765 }
766
767 /* call under rcu_read_lock */
legitimize_mnt(struct vfsmount * bastard,unsigned seq)768 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
769 {
770 int res = __legitimize_mnt(bastard, seq);
771 if (likely(!res))
772 return true;
773 if (unlikely(res < 0)) {
774 rcu_read_unlock();
775 mntput(bastard);
776 rcu_read_lock();
777 }
778 return false;
779 }
780
781 /**
782 * __lookup_mnt - find first child mount
783 * @mnt: parent mount
784 * @dentry: mountpoint
785 *
786 * If @mnt has a child mount @c mounted @dentry find and return it.
787 *
788 * Note that the child mount @c need not be unique. There are cases
789 * where shadow mounts are created. For example, during mount
790 * propagation when a source mount @mnt whose root got overmounted by a
791 * mount @o after path lookup but before @namespace_sem could be
792 * acquired gets copied and propagated. So @mnt gets copied including
793 * @o. When @mnt is propagated to a destination mount @d that already
794 * has another mount @n mounted at the same mountpoint then the source
795 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
796 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
797 * on @dentry.
798 *
799 * Return: The first child of @mnt mounted @dentry or NULL.
800 */
__lookup_mnt(struct vfsmount * mnt,struct dentry * dentry)801 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
802 {
803 struct hlist_head *head = m_hash(mnt, dentry);
804 struct mount *p;
805
806 hlist_for_each_entry_rcu(p, head, mnt_hash)
807 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
808 return p;
809 return NULL;
810 }
811
812 /*
813 * lookup_mnt - Return the first child mount mounted at path
814 *
815 * "First" means first mounted chronologically. If you create the
816 * following mounts:
817 *
818 * mount /dev/sda1 /mnt
819 * mount /dev/sda2 /mnt
820 * mount /dev/sda3 /mnt
821 *
822 * Then lookup_mnt() on the base /mnt dentry in the root mount will
823 * return successively the root dentry and vfsmount of /dev/sda1, then
824 * /dev/sda2, then /dev/sda3, then NULL.
825 *
826 * lookup_mnt takes a reference to the found vfsmount.
827 */
lookup_mnt(const struct path * path)828 struct vfsmount *lookup_mnt(const struct path *path)
829 {
830 struct mount *child_mnt;
831 struct vfsmount *m;
832 unsigned seq;
833
834 rcu_read_lock();
835 do {
836 seq = read_seqbegin(&mount_lock);
837 child_mnt = __lookup_mnt(path->mnt, path->dentry);
838 m = child_mnt ? &child_mnt->mnt : NULL;
839 } while (!legitimize_mnt(m, seq));
840 rcu_read_unlock();
841 return m;
842 }
843
844 /*
845 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
846 * current mount namespace.
847 *
848 * The common case is dentries are not mountpoints at all and that
849 * test is handled inline. For the slow case when we are actually
850 * dealing with a mountpoint of some kind, walk through all of the
851 * mounts in the current mount namespace and test to see if the dentry
852 * is a mountpoint.
853 *
854 * The mount_hashtable is not usable in the context because we
855 * need to identify all mounts that may be in the current mount
856 * namespace not just a mount that happens to have some specified
857 * parent mount.
858 */
__is_local_mountpoint(struct dentry * dentry)859 bool __is_local_mountpoint(struct dentry *dentry)
860 {
861 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
862 struct mount *mnt, *n;
863 bool is_covered = false;
864
865 down_read(&namespace_sem);
866 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
867 is_covered = (mnt->mnt_mountpoint == dentry);
868 if (is_covered)
869 break;
870 }
871 up_read(&namespace_sem);
872
873 return is_covered;
874 }
875
lookup_mountpoint(struct dentry * dentry)876 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
877 {
878 struct hlist_head *chain = mp_hash(dentry);
879 struct mountpoint *mp;
880
881 hlist_for_each_entry(mp, chain, m_hash) {
882 if (mp->m_dentry == dentry) {
883 mp->m_count++;
884 return mp;
885 }
886 }
887 return NULL;
888 }
889
get_mountpoint(struct dentry * dentry)890 static struct mountpoint *get_mountpoint(struct dentry *dentry)
891 {
892 struct mountpoint *mp, *new = NULL;
893 int ret;
894
895 if (d_mountpoint(dentry)) {
896 /* might be worth a WARN_ON() */
897 if (d_unlinked(dentry))
898 return ERR_PTR(-ENOENT);
899 mountpoint:
900 read_seqlock_excl(&mount_lock);
901 mp = lookup_mountpoint(dentry);
902 read_sequnlock_excl(&mount_lock);
903 if (mp)
904 goto done;
905 }
906
907 if (!new)
908 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
909 if (!new)
910 return ERR_PTR(-ENOMEM);
911
912
913 /* Exactly one processes may set d_mounted */
914 ret = d_set_mounted(dentry);
915
916 /* Someone else set d_mounted? */
917 if (ret == -EBUSY)
918 goto mountpoint;
919
920 /* The dentry is not available as a mountpoint? */
921 mp = ERR_PTR(ret);
922 if (ret)
923 goto done;
924
925 /* Add the new mountpoint to the hash table */
926 read_seqlock_excl(&mount_lock);
927 new->m_dentry = dget(dentry);
928 new->m_count = 1;
929 hlist_add_head(&new->m_hash, mp_hash(dentry));
930 INIT_HLIST_HEAD(&new->m_list);
931 read_sequnlock_excl(&mount_lock);
932
933 mp = new;
934 new = NULL;
935 done:
936 kfree(new);
937 return mp;
938 }
939
940 /*
941 * vfsmount lock must be held. Additionally, the caller is responsible
942 * for serializing calls for given disposal list.
943 */
__put_mountpoint(struct mountpoint * mp,struct list_head * list)944 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
945 {
946 if (!--mp->m_count) {
947 struct dentry *dentry = mp->m_dentry;
948 BUG_ON(!hlist_empty(&mp->m_list));
949 spin_lock(&dentry->d_lock);
950 dentry->d_flags &= ~DCACHE_MOUNTED;
951 spin_unlock(&dentry->d_lock);
952 dput_to_list(dentry, list);
953 hlist_del(&mp->m_hash);
954 kfree(mp);
955 }
956 }
957
958 /* called with namespace_lock and vfsmount lock */
put_mountpoint(struct mountpoint * mp)959 static void put_mountpoint(struct mountpoint *mp)
960 {
961 __put_mountpoint(mp, &ex_mountpoints);
962 }
963
check_mnt(struct mount * mnt)964 static inline int check_mnt(struct mount *mnt)
965 {
966 return mnt->mnt_ns == current->nsproxy->mnt_ns;
967 }
968
969 /*
970 * vfsmount lock must be held for write
971 */
touch_mnt_namespace(struct mnt_namespace * ns)972 static void touch_mnt_namespace(struct mnt_namespace *ns)
973 {
974 if (ns) {
975 ns->event = ++event;
976 wake_up_interruptible(&ns->poll);
977 }
978 }
979
980 /*
981 * vfsmount lock must be held for write
982 */
__touch_mnt_namespace(struct mnt_namespace * ns)983 static void __touch_mnt_namespace(struct mnt_namespace *ns)
984 {
985 if (ns && ns->event != event) {
986 ns->event = event;
987 wake_up_interruptible(&ns->poll);
988 }
989 }
990
991 /*
992 * vfsmount lock must be held for write
993 */
unhash_mnt(struct mount * mnt)994 static struct mountpoint *unhash_mnt(struct mount *mnt)
995 {
996 struct mountpoint *mp;
997 mnt->mnt_parent = mnt;
998 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
999 list_del_init(&mnt->mnt_child);
1000 hlist_del_init_rcu(&mnt->mnt_hash);
1001 hlist_del_init(&mnt->mnt_mp_list);
1002 mp = mnt->mnt_mp;
1003 mnt->mnt_mp = NULL;
1004 return mp;
1005 }
1006
1007 /*
1008 * vfsmount lock must be held for write
1009 */
umount_mnt(struct mount * mnt)1010 static void umount_mnt(struct mount *mnt)
1011 {
1012 put_mountpoint(unhash_mnt(mnt));
1013 }
1014
1015 /*
1016 * vfsmount lock must be held for write
1017 */
mnt_set_mountpoint(struct mount * mnt,struct mountpoint * mp,struct mount * child_mnt)1018 void mnt_set_mountpoint(struct mount *mnt,
1019 struct mountpoint *mp,
1020 struct mount *child_mnt)
1021 {
1022 mp->m_count++;
1023 mnt_add_count(mnt, 1); /* essentially, that's mntget */
1024 child_mnt->mnt_mountpoint = mp->m_dentry;
1025 child_mnt->mnt_parent = mnt;
1026 child_mnt->mnt_mp = mp;
1027 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
1028 }
1029
1030 /**
1031 * mnt_set_mountpoint_beneath - mount a mount beneath another one
1032 *
1033 * @new_parent: the source mount
1034 * @top_mnt: the mount beneath which @new_parent is mounted
1035 * @new_mp: the new mountpoint of @top_mnt on @new_parent
1036 *
1037 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
1038 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
1039 * @new_mp. And mount @new_parent on the old parent and old
1040 * mountpoint of @top_mnt.
1041 *
1042 * Context: This function expects namespace_lock() and lock_mount_hash()
1043 * to have been acquired in that order.
1044 */
mnt_set_mountpoint_beneath(struct mount * new_parent,struct mount * top_mnt,struct mountpoint * new_mp)1045 static void mnt_set_mountpoint_beneath(struct mount *new_parent,
1046 struct mount *top_mnt,
1047 struct mountpoint *new_mp)
1048 {
1049 struct mount *old_top_parent = top_mnt->mnt_parent;
1050 struct mountpoint *old_top_mp = top_mnt->mnt_mp;
1051
1052 mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent);
1053 mnt_change_mountpoint(new_parent, new_mp, top_mnt);
1054 }
1055
1056
__attach_mnt(struct mount * mnt,struct mount * parent)1057 static void __attach_mnt(struct mount *mnt, struct mount *parent)
1058 {
1059 hlist_add_head_rcu(&mnt->mnt_hash,
1060 m_hash(&parent->mnt, mnt->mnt_mountpoint));
1061 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1062 }
1063
1064 /**
1065 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
1066 * list of child mounts
1067 * @parent: the parent
1068 * @mnt: the new mount
1069 * @mp: the new mountpoint
1070 * @beneath: whether to mount @mnt beneath or on top of @parent
1071 *
1072 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
1073 * to @parent's child mount list and to @mount_hashtable.
1074 *
1075 * If @beneath is true, remove @mnt from its current parent and
1076 * mountpoint and mount it on @mp on @parent, and mount @parent on the
1077 * old parent and old mountpoint of @mnt. Finally, attach @parent to
1078 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
1079 *
1080 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
1081 * to the correct parent.
1082 *
1083 * Context: This function expects namespace_lock() and lock_mount_hash()
1084 * to have been acquired in that order.
1085 */
attach_mnt(struct mount * mnt,struct mount * parent,struct mountpoint * mp,bool beneath)1086 static void attach_mnt(struct mount *mnt, struct mount *parent,
1087 struct mountpoint *mp, bool beneath)
1088 {
1089 if (beneath)
1090 mnt_set_mountpoint_beneath(mnt, parent, mp);
1091 else
1092 mnt_set_mountpoint(parent, mp, mnt);
1093 /*
1094 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1095 * beneath @parent then @mnt will need to be attached to
1096 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1097 * isn't the same mount as @parent.
1098 */
1099 __attach_mnt(mnt, mnt->mnt_parent);
1100 }
1101
mnt_change_mountpoint(struct mount * parent,struct mountpoint * mp,struct mount * mnt)1102 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1103 {
1104 struct mountpoint *old_mp = mnt->mnt_mp;
1105 struct mount *old_parent = mnt->mnt_parent;
1106
1107 list_del_init(&mnt->mnt_child);
1108 hlist_del_init(&mnt->mnt_mp_list);
1109 hlist_del_init_rcu(&mnt->mnt_hash);
1110
1111 attach_mnt(mnt, parent, mp, false);
1112
1113 put_mountpoint(old_mp);
1114 mnt_add_count(old_parent, -1);
1115 }
1116
node_to_mount(struct rb_node * node)1117 static inline struct mount *node_to_mount(struct rb_node *node)
1118 {
1119 return node ? rb_entry(node, struct mount, mnt_node) : NULL;
1120 }
1121
mnt_add_to_ns(struct mnt_namespace * ns,struct mount * mnt)1122 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
1123 {
1124 struct rb_node **link = &ns->mounts.rb_node;
1125 struct rb_node *parent = NULL;
1126
1127 WARN_ON(mnt->mnt.mnt_flags & MNT_ONRB);
1128 mnt->mnt_ns = ns;
1129 while (*link) {
1130 parent = *link;
1131 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique)
1132 link = &parent->rb_left;
1133 else
1134 link = &parent->rb_right;
1135 }
1136 rb_link_node(&mnt->mnt_node, parent, link);
1137 rb_insert_color(&mnt->mnt_node, &ns->mounts);
1138 mnt->mnt.mnt_flags |= MNT_ONRB;
1139 }
1140
1141 /*
1142 * vfsmount lock must be held for write
1143 */
commit_tree(struct mount * mnt)1144 static void commit_tree(struct mount *mnt)
1145 {
1146 struct mount *parent = mnt->mnt_parent;
1147 struct mount *m;
1148 LIST_HEAD(head);
1149 struct mnt_namespace *n = parent->mnt_ns;
1150
1151 BUG_ON(parent == mnt);
1152
1153 list_add_tail(&head, &mnt->mnt_list);
1154 while (!list_empty(&head)) {
1155 m = list_first_entry(&head, typeof(*m), mnt_list);
1156 list_del(&m->mnt_list);
1157
1158 mnt_add_to_ns(n, m);
1159 }
1160 n->nr_mounts += n->pending_mounts;
1161 n->pending_mounts = 0;
1162
1163 __attach_mnt(mnt, parent);
1164 touch_mnt_namespace(n);
1165 }
1166
next_mnt(struct mount * p,struct mount * root)1167 static struct mount *next_mnt(struct mount *p, struct mount *root)
1168 {
1169 struct list_head *next = p->mnt_mounts.next;
1170 if (next == &p->mnt_mounts) {
1171 while (1) {
1172 if (p == root)
1173 return NULL;
1174 next = p->mnt_child.next;
1175 if (next != &p->mnt_parent->mnt_mounts)
1176 break;
1177 p = p->mnt_parent;
1178 }
1179 }
1180 return list_entry(next, struct mount, mnt_child);
1181 }
1182
skip_mnt_tree(struct mount * p)1183 static struct mount *skip_mnt_tree(struct mount *p)
1184 {
1185 struct list_head *prev = p->mnt_mounts.prev;
1186 while (prev != &p->mnt_mounts) {
1187 p = list_entry(prev, struct mount, mnt_child);
1188 prev = p->mnt_mounts.prev;
1189 }
1190 return p;
1191 }
1192
1193 /**
1194 * vfs_create_mount - Create a mount for a configured superblock
1195 * @fc: The configuration context with the superblock attached
1196 *
1197 * Create a mount to an already configured superblock. If necessary, the
1198 * caller should invoke vfs_get_tree() before calling this.
1199 *
1200 * Note that this does not attach the mount to anything.
1201 */
vfs_create_mount(struct fs_context * fc)1202 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1203 {
1204 struct mount *mnt;
1205
1206 if (!fc->root)
1207 return ERR_PTR(-EINVAL);
1208
1209 mnt = alloc_vfsmnt(fc->source ?: "none");
1210 if (!mnt)
1211 return ERR_PTR(-ENOMEM);
1212
1213 if (fc->sb_flags & SB_KERNMOUNT)
1214 mnt->mnt.mnt_flags = MNT_INTERNAL;
1215
1216 atomic_inc(&fc->root->d_sb->s_active);
1217 mnt->mnt.mnt_sb = fc->root->d_sb;
1218 mnt->mnt.mnt_root = dget(fc->root);
1219 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1220 mnt->mnt_parent = mnt;
1221
1222 lock_mount_hash();
1223 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1224 unlock_mount_hash();
1225 return &mnt->mnt;
1226 }
1227 EXPORT_SYMBOL(vfs_create_mount);
1228
fc_mount(struct fs_context * fc)1229 struct vfsmount *fc_mount(struct fs_context *fc)
1230 {
1231 int err = vfs_get_tree(fc);
1232 if (!err) {
1233 up_write(&fc->root->d_sb->s_umount);
1234 return vfs_create_mount(fc);
1235 }
1236 return ERR_PTR(err);
1237 }
1238 EXPORT_SYMBOL(fc_mount);
1239
vfs_kern_mount(struct file_system_type * type,int flags,const char * name,void * data)1240 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1241 int flags, const char *name,
1242 void *data)
1243 {
1244 struct fs_context *fc;
1245 struct vfsmount *mnt;
1246 int ret = 0;
1247
1248 if (!type)
1249 return ERR_PTR(-EINVAL);
1250
1251 fc = fs_context_for_mount(type, flags);
1252 if (IS_ERR(fc))
1253 return ERR_CAST(fc);
1254
1255 if (name)
1256 ret = vfs_parse_fs_string(fc, "source",
1257 name, strlen(name));
1258 if (!ret)
1259 ret = parse_monolithic_mount_data(fc, data);
1260 if (!ret)
1261 mnt = fc_mount(fc);
1262 else
1263 mnt = ERR_PTR(ret);
1264
1265 put_fs_context(fc);
1266 return mnt;
1267 }
1268 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1269
1270 struct vfsmount *
vfs_submount(const struct dentry * mountpoint,struct file_system_type * type,const char * name,void * data)1271 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1272 const char *name, void *data)
1273 {
1274 /* Until it is worked out how to pass the user namespace
1275 * through from the parent mount to the submount don't support
1276 * unprivileged mounts with submounts.
1277 */
1278 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1279 return ERR_PTR(-EPERM);
1280
1281 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1282 }
1283 EXPORT_SYMBOL_GPL(vfs_submount);
1284
clone_mnt(struct mount * old,struct dentry * root,int flag)1285 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1286 int flag)
1287 {
1288 struct super_block *sb = old->mnt.mnt_sb;
1289 struct mount *mnt;
1290 int err;
1291
1292 mnt = alloc_vfsmnt(old->mnt_devname);
1293 if (!mnt)
1294 return ERR_PTR(-ENOMEM);
1295
1296 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1297 mnt->mnt_group_id = 0; /* not a peer of original */
1298 else
1299 mnt->mnt_group_id = old->mnt_group_id;
1300
1301 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1302 err = mnt_alloc_group_id(mnt);
1303 if (err)
1304 goto out_free;
1305 }
1306
1307 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1308 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL|MNT_ONRB);
1309
1310 atomic_inc(&sb->s_active);
1311 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1312
1313 mnt->mnt.mnt_sb = sb;
1314 mnt->mnt.mnt_root = dget(root);
1315 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1316 mnt->mnt_parent = mnt;
1317 lock_mount_hash();
1318 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1319 unlock_mount_hash();
1320
1321 if ((flag & CL_SLAVE) ||
1322 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1323 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1324 mnt->mnt_master = old;
1325 CLEAR_MNT_SHARED(mnt);
1326 } else if (!(flag & CL_PRIVATE)) {
1327 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1328 list_add(&mnt->mnt_share, &old->mnt_share);
1329 if (IS_MNT_SLAVE(old))
1330 list_add(&mnt->mnt_slave, &old->mnt_slave);
1331 mnt->mnt_master = old->mnt_master;
1332 } else {
1333 CLEAR_MNT_SHARED(mnt);
1334 }
1335 if (flag & CL_MAKE_SHARED)
1336 set_mnt_shared(mnt);
1337
1338 /* stick the duplicate mount on the same expiry list
1339 * as the original if that was on one */
1340 if (flag & CL_EXPIRE) {
1341 if (!list_empty(&old->mnt_expire))
1342 list_add(&mnt->mnt_expire, &old->mnt_expire);
1343 }
1344
1345 return mnt;
1346
1347 out_free:
1348 mnt_free_id(mnt);
1349 free_vfsmnt(mnt);
1350 return ERR_PTR(err);
1351 }
1352
cleanup_mnt(struct mount * mnt)1353 static void cleanup_mnt(struct mount *mnt)
1354 {
1355 struct hlist_node *p;
1356 struct mount *m;
1357 /*
1358 * The warning here probably indicates that somebody messed
1359 * up a mnt_want/drop_write() pair. If this happens, the
1360 * filesystem was probably unable to make r/w->r/o transitions.
1361 * The locking used to deal with mnt_count decrement provides barriers,
1362 * so mnt_get_writers() below is safe.
1363 */
1364 WARN_ON(mnt_get_writers(mnt));
1365 if (unlikely(mnt->mnt_pins.first))
1366 mnt_pin_kill(mnt);
1367 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1368 hlist_del(&m->mnt_umount);
1369 mntput(&m->mnt);
1370 }
1371 fsnotify_vfsmount_delete(&mnt->mnt);
1372 dput(mnt->mnt.mnt_root);
1373 deactivate_super(mnt->mnt.mnt_sb);
1374 mnt_free_id(mnt);
1375 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1376 }
1377
__cleanup_mnt(struct rcu_head * head)1378 static void __cleanup_mnt(struct rcu_head *head)
1379 {
1380 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1381 }
1382
1383 static LLIST_HEAD(delayed_mntput_list);
delayed_mntput(struct work_struct * unused)1384 static void delayed_mntput(struct work_struct *unused)
1385 {
1386 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1387 struct mount *m, *t;
1388
1389 llist_for_each_entry_safe(m, t, node, mnt_llist)
1390 cleanup_mnt(m);
1391 }
1392 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1393
mntput_no_expire(struct mount * mnt)1394 static void mntput_no_expire(struct mount *mnt)
1395 {
1396 LIST_HEAD(list);
1397 int count;
1398
1399 rcu_read_lock();
1400 if (likely(READ_ONCE(mnt->mnt_ns))) {
1401 /*
1402 * Since we don't do lock_mount_hash() here,
1403 * ->mnt_ns can change under us. However, if it's
1404 * non-NULL, then there's a reference that won't
1405 * be dropped until after an RCU delay done after
1406 * turning ->mnt_ns NULL. So if we observe it
1407 * non-NULL under rcu_read_lock(), the reference
1408 * we are dropping is not the final one.
1409 */
1410 mnt_add_count(mnt, -1);
1411 rcu_read_unlock();
1412 return;
1413 }
1414 lock_mount_hash();
1415 /*
1416 * make sure that if __legitimize_mnt() has not seen us grab
1417 * mount_lock, we'll see their refcount increment here.
1418 */
1419 smp_mb();
1420 mnt_add_count(mnt, -1);
1421 count = mnt_get_count(mnt);
1422 if (count != 0) {
1423 WARN_ON(count < 0);
1424 rcu_read_unlock();
1425 unlock_mount_hash();
1426 return;
1427 }
1428 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1429 rcu_read_unlock();
1430 unlock_mount_hash();
1431 return;
1432 }
1433 mnt->mnt.mnt_flags |= MNT_DOOMED;
1434 rcu_read_unlock();
1435
1436 list_del(&mnt->mnt_instance);
1437
1438 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1439 struct mount *p, *tmp;
1440 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1441 __put_mountpoint(unhash_mnt(p), &list);
1442 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1443 }
1444 }
1445 unlock_mount_hash();
1446 shrink_dentry_list(&list);
1447
1448 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1449 struct task_struct *task = current;
1450 if (likely(!(task->flags & PF_KTHREAD))) {
1451 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1452 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1453 return;
1454 }
1455 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1456 schedule_delayed_work(&delayed_mntput_work, 1);
1457 return;
1458 }
1459 cleanup_mnt(mnt);
1460 }
1461
mntput(struct vfsmount * mnt)1462 void mntput(struct vfsmount *mnt)
1463 {
1464 if (mnt) {
1465 struct mount *m = real_mount(mnt);
1466 /* avoid cacheline pingpong */
1467 if (unlikely(m->mnt_expiry_mark))
1468 WRITE_ONCE(m->mnt_expiry_mark, 0);
1469 mntput_no_expire(m);
1470 }
1471 }
1472 EXPORT_SYMBOL(mntput);
1473
mntget(struct vfsmount * mnt)1474 struct vfsmount *mntget(struct vfsmount *mnt)
1475 {
1476 if (mnt)
1477 mnt_add_count(real_mount(mnt), 1);
1478 return mnt;
1479 }
1480 EXPORT_SYMBOL(mntget);
1481
1482 /*
1483 * Make a mount point inaccessible to new lookups.
1484 * Because there may still be current users, the caller MUST WAIT
1485 * for an RCU grace period before destroying the mount point.
1486 */
mnt_make_shortterm(struct vfsmount * mnt)1487 void mnt_make_shortterm(struct vfsmount *mnt)
1488 {
1489 if (mnt)
1490 real_mount(mnt)->mnt_ns = NULL;
1491 }
1492
1493 /**
1494 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1495 * @path: path to check
1496 *
1497 * d_mountpoint() can only be used reliably to establish if a dentry is
1498 * not mounted in any namespace and that common case is handled inline.
1499 * d_mountpoint() isn't aware of the possibility there may be multiple
1500 * mounts using a given dentry in a different namespace. This function
1501 * checks if the passed in path is a mountpoint rather than the dentry
1502 * alone.
1503 */
path_is_mountpoint(const struct path * path)1504 bool path_is_mountpoint(const struct path *path)
1505 {
1506 unsigned seq;
1507 bool res;
1508
1509 if (!d_mountpoint(path->dentry))
1510 return false;
1511
1512 rcu_read_lock();
1513 do {
1514 seq = read_seqbegin(&mount_lock);
1515 res = __path_is_mountpoint(path);
1516 } while (read_seqretry(&mount_lock, seq));
1517 rcu_read_unlock();
1518
1519 return res;
1520 }
1521 EXPORT_SYMBOL(path_is_mountpoint);
1522
mnt_clone_internal(const struct path * path)1523 struct vfsmount *mnt_clone_internal(const struct path *path)
1524 {
1525 struct mount *p;
1526 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1527 if (IS_ERR(p))
1528 return ERR_CAST(p);
1529 p->mnt.mnt_flags |= MNT_INTERNAL;
1530 return &p->mnt;
1531 }
1532
1533 /*
1534 * Returns the mount which either has the specified mnt_id, or has the next
1535 * smallest id afer the specified one.
1536 */
mnt_find_id_at(struct mnt_namespace * ns,u64 mnt_id)1537 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
1538 {
1539 struct rb_node *node = ns->mounts.rb_node;
1540 struct mount *ret = NULL;
1541
1542 while (node) {
1543 struct mount *m = node_to_mount(node);
1544
1545 if (mnt_id <= m->mnt_id_unique) {
1546 ret = node_to_mount(node);
1547 if (mnt_id == m->mnt_id_unique)
1548 break;
1549 node = node->rb_left;
1550 } else {
1551 node = node->rb_right;
1552 }
1553 }
1554 return ret;
1555 }
1556
1557 /*
1558 * Returns the mount which either has the specified mnt_id, or has the next
1559 * greater id before the specified one.
1560 */
mnt_find_id_at_reverse(struct mnt_namespace * ns,u64 mnt_id)1561 static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
1562 {
1563 struct rb_node *node = ns->mounts.rb_node;
1564 struct mount *ret = NULL;
1565
1566 while (node) {
1567 struct mount *m = node_to_mount(node);
1568
1569 if (mnt_id >= m->mnt_id_unique) {
1570 ret = node_to_mount(node);
1571 if (mnt_id == m->mnt_id_unique)
1572 break;
1573 node = node->rb_right;
1574 } else {
1575 node = node->rb_left;
1576 }
1577 }
1578 return ret;
1579 }
1580
1581 #ifdef CONFIG_PROC_FS
1582
1583 /* iterator; we want it to have access to namespace_sem, thus here... */
m_start(struct seq_file * m,loff_t * pos)1584 static void *m_start(struct seq_file *m, loff_t *pos)
1585 {
1586 struct proc_mounts *p = m->private;
1587
1588 down_read(&namespace_sem);
1589
1590 return mnt_find_id_at(p->ns, *pos);
1591 }
1592
m_next(struct seq_file * m,void * v,loff_t * pos)1593 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1594 {
1595 struct mount *next = NULL, *mnt = v;
1596 struct rb_node *node = rb_next(&mnt->mnt_node);
1597
1598 ++*pos;
1599 if (node) {
1600 next = node_to_mount(node);
1601 *pos = next->mnt_id_unique;
1602 }
1603 return next;
1604 }
1605
m_stop(struct seq_file * m,void * v)1606 static void m_stop(struct seq_file *m, void *v)
1607 {
1608 up_read(&namespace_sem);
1609 }
1610
m_show(struct seq_file * m,void * v)1611 static int m_show(struct seq_file *m, void *v)
1612 {
1613 struct proc_mounts *p = m->private;
1614 struct mount *r = v;
1615 return p->show(m, &r->mnt);
1616 }
1617
1618 const struct seq_operations mounts_op = {
1619 .start = m_start,
1620 .next = m_next,
1621 .stop = m_stop,
1622 .show = m_show,
1623 };
1624
1625 #endif /* CONFIG_PROC_FS */
1626
1627 /**
1628 * may_umount_tree - check if a mount tree is busy
1629 * @m: root of mount tree
1630 *
1631 * This is called to check if a tree of mounts has any
1632 * open files, pwds, chroots or sub mounts that are
1633 * busy.
1634 */
may_umount_tree(struct vfsmount * m)1635 int may_umount_tree(struct vfsmount *m)
1636 {
1637 struct mount *mnt = real_mount(m);
1638 int actual_refs = 0;
1639 int minimum_refs = 0;
1640 struct mount *p;
1641 BUG_ON(!m);
1642
1643 /* write lock needed for mnt_get_count */
1644 lock_mount_hash();
1645 for (p = mnt; p; p = next_mnt(p, mnt)) {
1646 actual_refs += mnt_get_count(p);
1647 minimum_refs += 2;
1648 }
1649 unlock_mount_hash();
1650
1651 if (actual_refs > minimum_refs)
1652 return 0;
1653
1654 return 1;
1655 }
1656
1657 EXPORT_SYMBOL(may_umount_tree);
1658
1659 /**
1660 * may_umount - check if a mount point is busy
1661 * @mnt: root of mount
1662 *
1663 * This is called to check if a mount point has any
1664 * open files, pwds, chroots or sub mounts. If the
1665 * mount has sub mounts this will return busy
1666 * regardless of whether the sub mounts are busy.
1667 *
1668 * Doesn't take quota and stuff into account. IOW, in some cases it will
1669 * give false negatives. The main reason why it's here is that we need
1670 * a non-destructive way to look for easily umountable filesystems.
1671 */
may_umount(struct vfsmount * mnt)1672 int may_umount(struct vfsmount *mnt)
1673 {
1674 int ret = 1;
1675 down_read(&namespace_sem);
1676 lock_mount_hash();
1677 if (propagate_mount_busy(real_mount(mnt), 2))
1678 ret = 0;
1679 unlock_mount_hash();
1680 up_read(&namespace_sem);
1681 return ret;
1682 }
1683
1684 EXPORT_SYMBOL(may_umount);
1685
namespace_unlock(void)1686 static void namespace_unlock(void)
1687 {
1688 struct hlist_head head;
1689 struct hlist_node *p;
1690 struct mount *m;
1691 LIST_HEAD(list);
1692
1693 hlist_move_list(&unmounted, &head);
1694 list_splice_init(&ex_mountpoints, &list);
1695
1696 up_write(&namespace_sem);
1697
1698 shrink_dentry_list(&list);
1699
1700 if (likely(hlist_empty(&head)))
1701 return;
1702
1703 synchronize_rcu_expedited();
1704
1705 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1706 hlist_del(&m->mnt_umount);
1707 mntput(&m->mnt);
1708 }
1709 }
1710
namespace_lock(void)1711 static inline void namespace_lock(void)
1712 {
1713 down_write(&namespace_sem);
1714 }
1715
1716 enum umount_tree_flags {
1717 UMOUNT_SYNC = 1,
1718 UMOUNT_PROPAGATE = 2,
1719 UMOUNT_CONNECTED = 4,
1720 };
1721
disconnect_mount(struct mount * mnt,enum umount_tree_flags how)1722 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1723 {
1724 /* Leaving mounts connected is only valid for lazy umounts */
1725 if (how & UMOUNT_SYNC)
1726 return true;
1727
1728 /* A mount without a parent has nothing to be connected to */
1729 if (!mnt_has_parent(mnt))
1730 return true;
1731
1732 /* Because the reference counting rules change when mounts are
1733 * unmounted and connected, umounted mounts may not be
1734 * connected to mounted mounts.
1735 */
1736 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1737 return true;
1738
1739 /* Has it been requested that the mount remain connected? */
1740 if (how & UMOUNT_CONNECTED)
1741 return false;
1742
1743 /* Is the mount locked such that it needs to remain connected? */
1744 if (IS_MNT_LOCKED(mnt))
1745 return false;
1746
1747 /* By default disconnect the mount */
1748 return true;
1749 }
1750
1751 /*
1752 * mount_lock must be held
1753 * namespace_sem must be held for write
1754 */
umount_tree(struct mount * mnt,enum umount_tree_flags how)1755 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1756 {
1757 LIST_HEAD(tmp_list);
1758 struct mount *p;
1759
1760 if (how & UMOUNT_PROPAGATE)
1761 propagate_mount_unlock(mnt);
1762
1763 /* Gather the mounts to umount */
1764 for (p = mnt; p; p = next_mnt(p, mnt)) {
1765 p->mnt.mnt_flags |= MNT_UMOUNT;
1766 if (p->mnt.mnt_flags & MNT_ONRB)
1767 move_from_ns(p, &tmp_list);
1768 else
1769 list_move(&p->mnt_list, &tmp_list);
1770 }
1771
1772 /* Hide the mounts from mnt_mounts */
1773 list_for_each_entry(p, &tmp_list, mnt_list) {
1774 list_del_init(&p->mnt_child);
1775 }
1776
1777 /* Add propogated mounts to the tmp_list */
1778 if (how & UMOUNT_PROPAGATE)
1779 propagate_umount(&tmp_list);
1780
1781 while (!list_empty(&tmp_list)) {
1782 struct mnt_namespace *ns;
1783 bool disconnect;
1784 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1785 list_del_init(&p->mnt_expire);
1786 list_del_init(&p->mnt_list);
1787 ns = p->mnt_ns;
1788 if (ns) {
1789 ns->nr_mounts--;
1790 __touch_mnt_namespace(ns);
1791 }
1792 p->mnt_ns = NULL;
1793 if (how & UMOUNT_SYNC)
1794 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1795
1796 disconnect = disconnect_mount(p, how);
1797 if (mnt_has_parent(p)) {
1798 mnt_add_count(p->mnt_parent, -1);
1799 if (!disconnect) {
1800 /* Don't forget about p */
1801 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1802 } else {
1803 umount_mnt(p);
1804 }
1805 }
1806 change_mnt_propagation(p, MS_PRIVATE);
1807 if (disconnect)
1808 hlist_add_head(&p->mnt_umount, &unmounted);
1809 }
1810 }
1811
1812 static void shrink_submounts(struct mount *mnt);
1813
do_umount_root(struct super_block * sb)1814 static int do_umount_root(struct super_block *sb)
1815 {
1816 int ret = 0;
1817
1818 down_write(&sb->s_umount);
1819 if (!sb_rdonly(sb)) {
1820 struct fs_context *fc;
1821
1822 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1823 SB_RDONLY);
1824 if (IS_ERR(fc)) {
1825 ret = PTR_ERR(fc);
1826 } else {
1827 ret = parse_monolithic_mount_data(fc, NULL);
1828 if (!ret)
1829 ret = reconfigure_super(fc);
1830 put_fs_context(fc);
1831 }
1832 }
1833 up_write(&sb->s_umount);
1834 return ret;
1835 }
1836
do_umount(struct mount * mnt,int flags)1837 static int do_umount(struct mount *mnt, int flags)
1838 {
1839 struct super_block *sb = mnt->mnt.mnt_sb;
1840 int retval;
1841
1842 retval = security_sb_umount(&mnt->mnt, flags);
1843 if (retval)
1844 return retval;
1845
1846 /*
1847 * Allow userspace to request a mountpoint be expired rather than
1848 * unmounting unconditionally. Unmount only happens if:
1849 * (1) the mark is already set (the mark is cleared by mntput())
1850 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1851 */
1852 if (flags & MNT_EXPIRE) {
1853 if (&mnt->mnt == current->fs->root.mnt ||
1854 flags & (MNT_FORCE | MNT_DETACH))
1855 return -EINVAL;
1856
1857 /*
1858 * probably don't strictly need the lock here if we examined
1859 * all race cases, but it's a slowpath.
1860 */
1861 lock_mount_hash();
1862 if (mnt_get_count(mnt) != 2) {
1863 unlock_mount_hash();
1864 return -EBUSY;
1865 }
1866 unlock_mount_hash();
1867
1868 if (!xchg(&mnt->mnt_expiry_mark, 1))
1869 return -EAGAIN;
1870 }
1871
1872 /*
1873 * If we may have to abort operations to get out of this
1874 * mount, and they will themselves hold resources we must
1875 * allow the fs to do things. In the Unix tradition of
1876 * 'Gee thats tricky lets do it in userspace' the umount_begin
1877 * might fail to complete on the first run through as other tasks
1878 * must return, and the like. Thats for the mount program to worry
1879 * about for the moment.
1880 */
1881
1882 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1883 sb->s_op->umount_begin(sb);
1884 }
1885
1886 /*
1887 * No sense to grab the lock for this test, but test itself looks
1888 * somewhat bogus. Suggestions for better replacement?
1889 * Ho-hum... In principle, we might treat that as umount + switch
1890 * to rootfs. GC would eventually take care of the old vfsmount.
1891 * Actually it makes sense, especially if rootfs would contain a
1892 * /reboot - static binary that would close all descriptors and
1893 * call reboot(9). Then init(8) could umount root and exec /reboot.
1894 */
1895 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1896 /*
1897 * Special case for "unmounting" root ...
1898 * we just try to remount it readonly.
1899 */
1900 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1901 return -EPERM;
1902 return do_umount_root(sb);
1903 }
1904
1905 namespace_lock();
1906 lock_mount_hash();
1907
1908 /* Recheck MNT_LOCKED with the locks held */
1909 retval = -EINVAL;
1910 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1911 goto out;
1912
1913 event++;
1914 if (flags & MNT_DETACH) {
1915 if (mnt->mnt.mnt_flags & MNT_ONRB ||
1916 !list_empty(&mnt->mnt_list))
1917 umount_tree(mnt, UMOUNT_PROPAGATE);
1918 retval = 0;
1919 } else {
1920 shrink_submounts(mnt);
1921 retval = -EBUSY;
1922 if (!propagate_mount_busy(mnt, 2)) {
1923 if (mnt->mnt.mnt_flags & MNT_ONRB ||
1924 !list_empty(&mnt->mnt_list))
1925 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1926 retval = 0;
1927 }
1928 }
1929 out:
1930 unlock_mount_hash();
1931 namespace_unlock();
1932 return retval;
1933 }
1934
1935 /*
1936 * __detach_mounts - lazily unmount all mounts on the specified dentry
1937 *
1938 * During unlink, rmdir, and d_drop it is possible to loose the path
1939 * to an existing mountpoint, and wind up leaking the mount.
1940 * detach_mounts allows lazily unmounting those mounts instead of
1941 * leaking them.
1942 *
1943 * The caller may hold dentry->d_inode->i_mutex.
1944 */
__detach_mounts(struct dentry * dentry)1945 void __detach_mounts(struct dentry *dentry)
1946 {
1947 struct mountpoint *mp;
1948 struct mount *mnt;
1949
1950 namespace_lock();
1951 lock_mount_hash();
1952 mp = lookup_mountpoint(dentry);
1953 if (!mp)
1954 goto out_unlock;
1955
1956 event++;
1957 while (!hlist_empty(&mp->m_list)) {
1958 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1959 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1960 umount_mnt(mnt);
1961 hlist_add_head(&mnt->mnt_umount, &unmounted);
1962 }
1963 else umount_tree(mnt, UMOUNT_CONNECTED);
1964 }
1965 put_mountpoint(mp);
1966 out_unlock:
1967 unlock_mount_hash();
1968 namespace_unlock();
1969 }
1970
1971 /*
1972 * Is the caller allowed to modify his namespace?
1973 */
may_mount(void)1974 bool may_mount(void)
1975 {
1976 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1977 }
1978
warn_mandlock(void)1979 static void warn_mandlock(void)
1980 {
1981 pr_warn_once("=======================================================\n"
1982 "WARNING: The mand mount option has been deprecated and\n"
1983 " and is ignored by this kernel. Remove the mand\n"
1984 " option from the mount to silence this warning.\n"
1985 "=======================================================\n");
1986 }
1987
can_umount(const struct path * path,int flags)1988 static int can_umount(const struct path *path, int flags)
1989 {
1990 struct mount *mnt = real_mount(path->mnt);
1991
1992 if (!may_mount())
1993 return -EPERM;
1994 if (!path_mounted(path))
1995 return -EINVAL;
1996 if (!check_mnt(mnt))
1997 return -EINVAL;
1998 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1999 return -EINVAL;
2000 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
2001 return -EPERM;
2002 return 0;
2003 }
2004
2005 // caller is responsible for flags being sane
path_umount(struct path * path,int flags)2006 int path_umount(struct path *path, int flags)
2007 {
2008 struct mount *mnt = real_mount(path->mnt);
2009 int ret;
2010
2011 ret = can_umount(path, flags);
2012 if (!ret)
2013 ret = do_umount(mnt, flags);
2014
2015 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
2016 dput(path->dentry);
2017 mntput_no_expire(mnt);
2018 return ret;
2019 }
2020
ksys_umount(char __user * name,int flags)2021 static int ksys_umount(char __user *name, int flags)
2022 {
2023 int lookup_flags = LOOKUP_MOUNTPOINT;
2024 struct path path;
2025 int ret;
2026
2027 // basic validity checks done first
2028 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
2029 return -EINVAL;
2030
2031 if (!(flags & UMOUNT_NOFOLLOW))
2032 lookup_flags |= LOOKUP_FOLLOW;
2033 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
2034 if (ret)
2035 return ret;
2036 return path_umount(&path, flags);
2037 }
2038
SYSCALL_DEFINE2(umount,char __user *,name,int,flags)2039 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
2040 {
2041 return ksys_umount(name, flags);
2042 }
2043
2044 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
2045
2046 /*
2047 * The 2.0 compatible umount. No flags.
2048 */
SYSCALL_DEFINE1(oldumount,char __user *,name)2049 SYSCALL_DEFINE1(oldumount, char __user *, name)
2050 {
2051 return ksys_umount(name, 0);
2052 }
2053
2054 #endif
2055
is_mnt_ns_file(struct dentry * dentry)2056 static bool is_mnt_ns_file(struct dentry *dentry)
2057 {
2058 /* Is this a proxy for a mount namespace? */
2059 return dentry->d_op == &ns_dentry_operations &&
2060 dentry->d_fsdata == &mntns_operations;
2061 }
2062
to_mnt_ns(struct ns_common * ns)2063 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
2064 {
2065 return container_of(ns, struct mnt_namespace, ns);
2066 }
2067
from_mnt_ns(struct mnt_namespace * mnt)2068 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
2069 {
2070 return &mnt->ns;
2071 }
2072
mnt_ns_loop(struct dentry * dentry)2073 static bool mnt_ns_loop(struct dentry *dentry)
2074 {
2075 /* Could bind mounting the mount namespace inode cause a
2076 * mount namespace loop?
2077 */
2078 struct mnt_namespace *mnt_ns;
2079 if (!is_mnt_ns_file(dentry))
2080 return false;
2081
2082 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
2083 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
2084 }
2085
copy_tree(struct mount * src_root,struct dentry * dentry,int flag)2086 struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
2087 int flag)
2088 {
2089 struct mount *res, *src_parent, *src_root_child, *src_mnt,
2090 *dst_parent, *dst_mnt;
2091
2092 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root))
2093 return ERR_PTR(-EINVAL);
2094
2095 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
2096 return ERR_PTR(-EINVAL);
2097
2098 res = dst_mnt = clone_mnt(src_root, dentry, flag);
2099 if (IS_ERR(dst_mnt))
2100 return dst_mnt;
2101
2102 src_parent = src_root;
2103 dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint;
2104
2105 list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
2106 if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
2107 continue;
2108
2109 for (src_mnt = src_root_child; src_mnt;
2110 src_mnt = next_mnt(src_mnt, src_root_child)) {
2111 if (!(flag & CL_COPY_UNBINDABLE) &&
2112 IS_MNT_UNBINDABLE(src_mnt)) {
2113 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
2114 /* Both unbindable and locked. */
2115 dst_mnt = ERR_PTR(-EPERM);
2116 goto out;
2117 } else {
2118 src_mnt = skip_mnt_tree(src_mnt);
2119 continue;
2120 }
2121 }
2122 if (!(flag & CL_COPY_MNT_NS_FILE) &&
2123 is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
2124 src_mnt = skip_mnt_tree(src_mnt);
2125 continue;
2126 }
2127 while (src_parent != src_mnt->mnt_parent) {
2128 src_parent = src_parent->mnt_parent;
2129 dst_mnt = dst_mnt->mnt_parent;
2130 }
2131
2132 src_parent = src_mnt;
2133 dst_parent = dst_mnt;
2134 dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag);
2135 if (IS_ERR(dst_mnt))
2136 goto out;
2137 lock_mount_hash();
2138 list_add_tail(&dst_mnt->mnt_list, &res->mnt_list);
2139 attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false);
2140 unlock_mount_hash();
2141 }
2142 }
2143 return res;
2144
2145 out:
2146 if (res) {
2147 lock_mount_hash();
2148 umount_tree(res, UMOUNT_SYNC);
2149 unlock_mount_hash();
2150 }
2151 return dst_mnt;
2152 }
2153
2154 /* Caller should check returned pointer for errors */
2155
collect_mounts(const struct path * path)2156 struct vfsmount *collect_mounts(const struct path *path)
2157 {
2158 struct mount *tree;
2159 namespace_lock();
2160 if (!check_mnt(real_mount(path->mnt)))
2161 tree = ERR_PTR(-EINVAL);
2162 else
2163 tree = copy_tree(real_mount(path->mnt), path->dentry,
2164 CL_COPY_ALL | CL_PRIVATE);
2165 namespace_unlock();
2166 if (IS_ERR(tree))
2167 return ERR_CAST(tree);
2168 return &tree->mnt;
2169 }
2170
2171 static void free_mnt_ns(struct mnt_namespace *);
2172 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2173
dissolve_on_fput(struct vfsmount * mnt)2174 void dissolve_on_fput(struct vfsmount *mnt)
2175 {
2176 struct mnt_namespace *ns;
2177 namespace_lock();
2178 lock_mount_hash();
2179 ns = real_mount(mnt)->mnt_ns;
2180 if (ns) {
2181 if (is_anon_ns(ns))
2182 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2183 else
2184 ns = NULL;
2185 }
2186 unlock_mount_hash();
2187 namespace_unlock();
2188 if (ns)
2189 free_mnt_ns(ns);
2190 }
2191
drop_collected_mounts(struct vfsmount * mnt)2192 void drop_collected_mounts(struct vfsmount *mnt)
2193 {
2194 namespace_lock();
2195 lock_mount_hash();
2196 umount_tree(real_mount(mnt), 0);
2197 unlock_mount_hash();
2198 namespace_unlock();
2199 }
2200
has_locked_children(struct mount * mnt,struct dentry * dentry)2201 bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2202 {
2203 struct mount *child;
2204
2205 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2206 if (!is_subdir(child->mnt_mountpoint, dentry))
2207 continue;
2208
2209 if (child->mnt.mnt_flags & MNT_LOCKED)
2210 return true;
2211 }
2212 return false;
2213 }
2214
2215 /**
2216 * clone_private_mount - create a private clone of a path
2217 * @path: path to clone
2218 *
2219 * This creates a new vfsmount, which will be the clone of @path. The new mount
2220 * will not be attached anywhere in the namespace and will be private (i.e.
2221 * changes to the originating mount won't be propagated into this).
2222 *
2223 * Release with mntput().
2224 */
clone_private_mount(const struct path * path)2225 struct vfsmount *clone_private_mount(const struct path *path)
2226 {
2227 struct mount *old_mnt = real_mount(path->mnt);
2228 struct mount *new_mnt;
2229
2230 down_read(&namespace_sem);
2231 if (IS_MNT_UNBINDABLE(old_mnt))
2232 goto invalid;
2233
2234 if (!check_mnt(old_mnt))
2235 goto invalid;
2236
2237 if (has_locked_children(old_mnt, path->dentry))
2238 goto invalid;
2239
2240 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2241 up_read(&namespace_sem);
2242
2243 if (IS_ERR(new_mnt))
2244 return ERR_CAST(new_mnt);
2245
2246 /* Longterm mount to be removed by kern_unmount*() */
2247 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2248
2249 return &new_mnt->mnt;
2250
2251 invalid:
2252 up_read(&namespace_sem);
2253 return ERR_PTR(-EINVAL);
2254 }
2255 EXPORT_SYMBOL_GPL(clone_private_mount);
2256
iterate_mounts(int (* f)(struct vfsmount *,void *),void * arg,struct vfsmount * root)2257 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2258 struct vfsmount *root)
2259 {
2260 struct mount *mnt;
2261 int res = f(root, arg);
2262 if (res)
2263 return res;
2264 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2265 res = f(&mnt->mnt, arg);
2266 if (res)
2267 return res;
2268 }
2269 return 0;
2270 }
2271
lock_mnt_tree(struct mount * mnt)2272 static void lock_mnt_tree(struct mount *mnt)
2273 {
2274 struct mount *p;
2275
2276 for (p = mnt; p; p = next_mnt(p, mnt)) {
2277 int flags = p->mnt.mnt_flags;
2278 /* Don't allow unprivileged users to change mount flags */
2279 flags |= MNT_LOCK_ATIME;
2280
2281 if (flags & MNT_READONLY)
2282 flags |= MNT_LOCK_READONLY;
2283
2284 if (flags & MNT_NODEV)
2285 flags |= MNT_LOCK_NODEV;
2286
2287 if (flags & MNT_NOSUID)
2288 flags |= MNT_LOCK_NOSUID;
2289
2290 if (flags & MNT_NOEXEC)
2291 flags |= MNT_LOCK_NOEXEC;
2292 /* Don't allow unprivileged users to reveal what is under a mount */
2293 if (list_empty(&p->mnt_expire))
2294 flags |= MNT_LOCKED;
2295 p->mnt.mnt_flags = flags;
2296 }
2297 }
2298
cleanup_group_ids(struct mount * mnt,struct mount * end)2299 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2300 {
2301 struct mount *p;
2302
2303 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2304 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2305 mnt_release_group_id(p);
2306 }
2307 }
2308
invent_group_ids(struct mount * mnt,bool recurse)2309 static int invent_group_ids(struct mount *mnt, bool recurse)
2310 {
2311 struct mount *p;
2312
2313 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2314 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2315 int err = mnt_alloc_group_id(p);
2316 if (err) {
2317 cleanup_group_ids(mnt, p);
2318 return err;
2319 }
2320 }
2321 }
2322
2323 return 0;
2324 }
2325
count_mounts(struct mnt_namespace * ns,struct mount * mnt)2326 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2327 {
2328 unsigned int max = READ_ONCE(sysctl_mount_max);
2329 unsigned int mounts = 0;
2330 struct mount *p;
2331
2332 if (ns->nr_mounts >= max)
2333 return -ENOSPC;
2334 max -= ns->nr_mounts;
2335 if (ns->pending_mounts >= max)
2336 return -ENOSPC;
2337 max -= ns->pending_mounts;
2338
2339 for (p = mnt; p; p = next_mnt(p, mnt))
2340 mounts++;
2341
2342 if (mounts > max)
2343 return -ENOSPC;
2344
2345 ns->pending_mounts += mounts;
2346 return 0;
2347 }
2348
2349 enum mnt_tree_flags_t {
2350 MNT_TREE_MOVE = BIT(0),
2351 MNT_TREE_BENEATH = BIT(1),
2352 };
2353
2354 /**
2355 * attach_recursive_mnt - attach a source mount tree
2356 * @source_mnt: mount tree to be attached
2357 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
2358 * @dest_mp: the mountpoint @source_mnt will be mounted at
2359 * @flags: modify how @source_mnt is supposed to be attached
2360 *
2361 * NOTE: in the table below explains the semantics when a source mount
2362 * of a given type is attached to a destination mount of a given type.
2363 * ---------------------------------------------------------------------------
2364 * | BIND MOUNT OPERATION |
2365 * |**************************************************************************
2366 * | source-->| shared | private | slave | unbindable |
2367 * | dest | | | | |
2368 * | | | | | | |
2369 * | v | | | | |
2370 * |**************************************************************************
2371 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2372 * | | | | | |
2373 * |non-shared| shared (+) | private | slave (*) | invalid |
2374 * ***************************************************************************
2375 * A bind operation clones the source mount and mounts the clone on the
2376 * destination mount.
2377 *
2378 * (++) the cloned mount is propagated to all the mounts in the propagation
2379 * tree of the destination mount and the cloned mount is added to
2380 * the peer group of the source mount.
2381 * (+) the cloned mount is created under the destination mount and is marked
2382 * as shared. The cloned mount is added to the peer group of the source
2383 * mount.
2384 * (+++) the mount is propagated to all the mounts in the propagation tree
2385 * of the destination mount and the cloned mount is made slave
2386 * of the same master as that of the source mount. The cloned mount
2387 * is marked as 'shared and slave'.
2388 * (*) the cloned mount is made a slave of the same master as that of the
2389 * source mount.
2390 *
2391 * ---------------------------------------------------------------------------
2392 * | MOVE MOUNT OPERATION |
2393 * |**************************************************************************
2394 * | source-->| shared | private | slave | unbindable |
2395 * | dest | | | | |
2396 * | | | | | | |
2397 * | v | | | | |
2398 * |**************************************************************************
2399 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2400 * | | | | | |
2401 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2402 * ***************************************************************************
2403 *
2404 * (+) the mount is moved to the destination. And is then propagated to
2405 * all the mounts in the propagation tree of the destination mount.
2406 * (+*) the mount is moved to the destination.
2407 * (+++) the mount is moved to the destination and is then propagated to
2408 * all the mounts belonging to the destination mount's propagation tree.
2409 * the mount is marked as 'shared and slave'.
2410 * (*) the mount continues to be a slave at the new location.
2411 *
2412 * if the source mount is a tree, the operations explained above is
2413 * applied to each mount in the tree.
2414 * Must be called without spinlocks held, since this function can sleep
2415 * in allocations.
2416 *
2417 * Context: The function expects namespace_lock() to be held.
2418 * Return: If @source_mnt was successfully attached 0 is returned.
2419 * Otherwise a negative error code is returned.
2420 */
attach_recursive_mnt(struct mount * source_mnt,struct mount * top_mnt,struct mountpoint * dest_mp,enum mnt_tree_flags_t flags)2421 static int attach_recursive_mnt(struct mount *source_mnt,
2422 struct mount *top_mnt,
2423 struct mountpoint *dest_mp,
2424 enum mnt_tree_flags_t flags)
2425 {
2426 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2427 HLIST_HEAD(tree_list);
2428 struct mnt_namespace *ns = top_mnt->mnt_ns;
2429 struct mountpoint *smp;
2430 struct mount *child, *dest_mnt, *p;
2431 struct hlist_node *n;
2432 int err = 0;
2433 bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH;
2434
2435 /*
2436 * Preallocate a mountpoint in case the new mounts need to be
2437 * mounted beneath mounts on the same mountpoint.
2438 */
2439 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2440 if (IS_ERR(smp))
2441 return PTR_ERR(smp);
2442
2443 /* Is there space to add these mounts to the mount namespace? */
2444 if (!moving) {
2445 err = count_mounts(ns, source_mnt);
2446 if (err)
2447 goto out;
2448 }
2449
2450 if (beneath)
2451 dest_mnt = top_mnt->mnt_parent;
2452 else
2453 dest_mnt = top_mnt;
2454
2455 if (IS_MNT_SHARED(dest_mnt)) {
2456 err = invent_group_ids(source_mnt, true);
2457 if (err)
2458 goto out;
2459 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2460 }
2461 lock_mount_hash();
2462 if (err)
2463 goto out_cleanup_ids;
2464
2465 if (IS_MNT_SHARED(dest_mnt)) {
2466 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2467 set_mnt_shared(p);
2468 }
2469
2470 if (moving) {
2471 if (beneath)
2472 dest_mp = smp;
2473 unhash_mnt(source_mnt);
2474 attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
2475 touch_mnt_namespace(source_mnt->mnt_ns);
2476 } else {
2477 if (source_mnt->mnt_ns) {
2478 LIST_HEAD(head);
2479
2480 /* move from anon - the caller will destroy */
2481 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2482 move_from_ns(p, &head);
2483 list_del_init(&head);
2484 }
2485 if (beneath)
2486 mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
2487 else
2488 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2489 commit_tree(source_mnt);
2490 }
2491
2492 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2493 struct mount *q;
2494 hlist_del_init(&child->mnt_hash);
2495 q = __lookup_mnt(&child->mnt_parent->mnt,
2496 child->mnt_mountpoint);
2497 if (q)
2498 mnt_change_mountpoint(child, smp, q);
2499 /* Notice when we are propagating across user namespaces */
2500 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2501 lock_mnt_tree(child);
2502 child->mnt.mnt_flags &= ~MNT_LOCKED;
2503 commit_tree(child);
2504 }
2505 put_mountpoint(smp);
2506 unlock_mount_hash();
2507
2508 return 0;
2509
2510 out_cleanup_ids:
2511 while (!hlist_empty(&tree_list)) {
2512 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2513 child->mnt_parent->mnt_ns->pending_mounts = 0;
2514 umount_tree(child, UMOUNT_SYNC);
2515 }
2516 unlock_mount_hash();
2517 cleanup_group_ids(source_mnt, NULL);
2518 out:
2519 ns->pending_mounts = 0;
2520
2521 read_seqlock_excl(&mount_lock);
2522 put_mountpoint(smp);
2523 read_sequnlock_excl(&mount_lock);
2524
2525 return err;
2526 }
2527
2528 /**
2529 * do_lock_mount - lock mount and mountpoint
2530 * @path: target path
2531 * @beneath: whether the intention is to mount beneath @path
2532 *
2533 * Follow the mount stack on @path until the top mount @mnt is found. If
2534 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2535 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2536 * until nothing is stacked on top of it anymore.
2537 *
2538 * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2539 * against concurrent removal of the new mountpoint from another mount
2540 * namespace.
2541 *
2542 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2543 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2544 * concurrent unlink of @mp->mnt_dentry from another mount namespace
2545 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2546 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2547 * on top of it for @beneath.
2548 *
2549 * In addition, @beneath needs to make sure that @mnt hasn't been
2550 * unmounted or moved from its current mountpoint in between dropping
2551 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2552 * being unmounted would be detected later by e.g., calling
2553 * check_mnt(mnt) in the function it's called from. For the @beneath
2554 * case however, it's useful to detect it directly in do_lock_mount().
2555 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2556 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2557 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2558 *
2559 * Return: Either the target mountpoint on the top mount or the top
2560 * mount's mountpoint.
2561 */
do_lock_mount(struct path * path,bool beneath)2562 static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
2563 {
2564 struct vfsmount *mnt = path->mnt;
2565 struct dentry *dentry;
2566 struct mountpoint *mp = ERR_PTR(-ENOENT);
2567
2568 for (;;) {
2569 struct mount *m;
2570
2571 if (beneath) {
2572 m = real_mount(mnt);
2573 read_seqlock_excl(&mount_lock);
2574 dentry = dget(m->mnt_mountpoint);
2575 read_sequnlock_excl(&mount_lock);
2576 } else {
2577 dentry = path->dentry;
2578 }
2579
2580 inode_lock(dentry->d_inode);
2581 if (unlikely(cant_mount(dentry))) {
2582 inode_unlock(dentry->d_inode);
2583 goto out;
2584 }
2585
2586 namespace_lock();
2587
2588 if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
2589 namespace_unlock();
2590 inode_unlock(dentry->d_inode);
2591 goto out;
2592 }
2593
2594 mnt = lookup_mnt(path);
2595 if (likely(!mnt))
2596 break;
2597
2598 namespace_unlock();
2599 inode_unlock(dentry->d_inode);
2600 if (beneath)
2601 dput(dentry);
2602 path_put(path);
2603 path->mnt = mnt;
2604 path->dentry = dget(mnt->mnt_root);
2605 }
2606
2607 mp = get_mountpoint(dentry);
2608 if (IS_ERR(mp)) {
2609 namespace_unlock();
2610 inode_unlock(dentry->d_inode);
2611 }
2612
2613 out:
2614 if (beneath)
2615 dput(dentry);
2616
2617 return mp;
2618 }
2619
lock_mount(struct path * path)2620 static inline struct mountpoint *lock_mount(struct path *path)
2621 {
2622 return do_lock_mount(path, false);
2623 }
2624
unlock_mount(struct mountpoint * where)2625 static void unlock_mount(struct mountpoint *where)
2626 {
2627 struct dentry *dentry = where->m_dentry;
2628
2629 read_seqlock_excl(&mount_lock);
2630 put_mountpoint(where);
2631 read_sequnlock_excl(&mount_lock);
2632
2633 namespace_unlock();
2634 inode_unlock(dentry->d_inode);
2635 }
2636
graft_tree(struct mount * mnt,struct mount * p,struct mountpoint * mp)2637 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2638 {
2639 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2640 return -EINVAL;
2641
2642 if (d_is_dir(mp->m_dentry) !=
2643 d_is_dir(mnt->mnt.mnt_root))
2644 return -ENOTDIR;
2645
2646 return attach_recursive_mnt(mnt, p, mp, 0);
2647 }
2648
2649 /*
2650 * Sanity check the flags to change_mnt_propagation.
2651 */
2652
flags_to_propagation_type(int ms_flags)2653 static int flags_to_propagation_type(int ms_flags)
2654 {
2655 int type = ms_flags & ~(MS_REC | MS_SILENT);
2656
2657 /* Fail if any non-propagation flags are set */
2658 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2659 return 0;
2660 /* Only one propagation flag should be set */
2661 if (!is_power_of_2(type))
2662 return 0;
2663 return type;
2664 }
2665
2666 /*
2667 * recursively change the type of the mountpoint.
2668 */
do_change_type(struct path * path,int ms_flags)2669 static int do_change_type(struct path *path, int ms_flags)
2670 {
2671 struct mount *m;
2672 struct mount *mnt = real_mount(path->mnt);
2673 int recurse = ms_flags & MS_REC;
2674 int type;
2675 int err = 0;
2676
2677 if (!path_mounted(path))
2678 return -EINVAL;
2679
2680 type = flags_to_propagation_type(ms_flags);
2681 if (!type)
2682 return -EINVAL;
2683
2684 namespace_lock();
2685 if (type == MS_SHARED) {
2686 err = invent_group_ids(mnt, recurse);
2687 if (err)
2688 goto out_unlock;
2689 }
2690
2691 lock_mount_hash();
2692 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2693 change_mnt_propagation(m, type);
2694 unlock_mount_hash();
2695
2696 out_unlock:
2697 namespace_unlock();
2698 return err;
2699 }
2700
__do_loopback(struct path * old_path,int recurse)2701 static struct mount *__do_loopback(struct path *old_path, int recurse)
2702 {
2703 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2704
2705 if (IS_MNT_UNBINDABLE(old))
2706 return mnt;
2707
2708 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2709 return mnt;
2710
2711 if (!recurse && has_locked_children(old, old_path->dentry))
2712 return mnt;
2713
2714 if (recurse)
2715 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2716 else
2717 mnt = clone_mnt(old, old_path->dentry, 0);
2718
2719 if (!IS_ERR(mnt))
2720 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2721
2722 return mnt;
2723 }
2724
2725 /*
2726 * do loopback mount.
2727 */
do_loopback(struct path * path,const char * old_name,int recurse)2728 static int do_loopback(struct path *path, const char *old_name,
2729 int recurse)
2730 {
2731 struct path old_path;
2732 struct mount *mnt = NULL, *parent;
2733 struct mountpoint *mp;
2734 int err;
2735 if (!old_name || !*old_name)
2736 return -EINVAL;
2737 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2738 if (err)
2739 return err;
2740
2741 err = -EINVAL;
2742 if (mnt_ns_loop(old_path.dentry))
2743 goto out;
2744
2745 mp = lock_mount(path);
2746 if (IS_ERR(mp)) {
2747 err = PTR_ERR(mp);
2748 goto out;
2749 }
2750
2751 parent = real_mount(path->mnt);
2752 if (!check_mnt(parent))
2753 goto out2;
2754
2755 mnt = __do_loopback(&old_path, recurse);
2756 if (IS_ERR(mnt)) {
2757 err = PTR_ERR(mnt);
2758 goto out2;
2759 }
2760
2761 err = graft_tree(mnt, parent, mp);
2762 if (err) {
2763 lock_mount_hash();
2764 umount_tree(mnt, UMOUNT_SYNC);
2765 unlock_mount_hash();
2766 }
2767 out2:
2768 unlock_mount(mp);
2769 out:
2770 path_put(&old_path);
2771 return err;
2772 }
2773
open_detached_copy(struct path * path,bool recursive)2774 static struct file *open_detached_copy(struct path *path, bool recursive)
2775 {
2776 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2777 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2778 struct mount *mnt, *p;
2779 struct file *file;
2780
2781 if (IS_ERR(ns))
2782 return ERR_CAST(ns);
2783
2784 namespace_lock();
2785 mnt = __do_loopback(path, recursive);
2786 if (IS_ERR(mnt)) {
2787 namespace_unlock();
2788 free_mnt_ns(ns);
2789 return ERR_CAST(mnt);
2790 }
2791
2792 lock_mount_hash();
2793 for (p = mnt; p; p = next_mnt(p, mnt)) {
2794 mnt_add_to_ns(ns, p);
2795 ns->nr_mounts++;
2796 }
2797 ns->root = mnt;
2798 mntget(&mnt->mnt);
2799 unlock_mount_hash();
2800 namespace_unlock();
2801
2802 mntput(path->mnt);
2803 path->mnt = &mnt->mnt;
2804 file = dentry_open(path, O_PATH, current_cred());
2805 if (IS_ERR(file))
2806 dissolve_on_fput(path->mnt);
2807 else
2808 file->f_mode |= FMODE_NEED_UNMOUNT;
2809 return file;
2810 }
2811
SYSCALL_DEFINE3(open_tree,int,dfd,const char __user *,filename,unsigned,flags)2812 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2813 {
2814 struct file *file;
2815 struct path path;
2816 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2817 bool detached = flags & OPEN_TREE_CLONE;
2818 int error;
2819 int fd;
2820
2821 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2822
2823 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2824 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2825 OPEN_TREE_CLOEXEC))
2826 return -EINVAL;
2827
2828 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2829 return -EINVAL;
2830
2831 if (flags & AT_NO_AUTOMOUNT)
2832 lookup_flags &= ~LOOKUP_AUTOMOUNT;
2833 if (flags & AT_SYMLINK_NOFOLLOW)
2834 lookup_flags &= ~LOOKUP_FOLLOW;
2835 if (flags & AT_EMPTY_PATH)
2836 lookup_flags |= LOOKUP_EMPTY;
2837
2838 if (detached && !may_mount())
2839 return -EPERM;
2840
2841 fd = get_unused_fd_flags(flags & O_CLOEXEC);
2842 if (fd < 0)
2843 return fd;
2844
2845 error = user_path_at(dfd, filename, lookup_flags, &path);
2846 if (unlikely(error)) {
2847 file = ERR_PTR(error);
2848 } else {
2849 if (detached)
2850 file = open_detached_copy(&path, flags & AT_RECURSIVE);
2851 else
2852 file = dentry_open(&path, O_PATH, current_cred());
2853 path_put(&path);
2854 }
2855 if (IS_ERR(file)) {
2856 put_unused_fd(fd);
2857 return PTR_ERR(file);
2858 }
2859 fd_install(fd, file);
2860 return fd;
2861 }
2862
2863 /*
2864 * Don't allow locked mount flags to be cleared.
2865 *
2866 * No locks need to be held here while testing the various MNT_LOCK
2867 * flags because those flags can never be cleared once they are set.
2868 */
can_change_locked_flags(struct mount * mnt,unsigned int mnt_flags)2869 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2870 {
2871 unsigned int fl = mnt->mnt.mnt_flags;
2872
2873 if ((fl & MNT_LOCK_READONLY) &&
2874 !(mnt_flags & MNT_READONLY))
2875 return false;
2876
2877 if ((fl & MNT_LOCK_NODEV) &&
2878 !(mnt_flags & MNT_NODEV))
2879 return false;
2880
2881 if ((fl & MNT_LOCK_NOSUID) &&
2882 !(mnt_flags & MNT_NOSUID))
2883 return false;
2884
2885 if ((fl & MNT_LOCK_NOEXEC) &&
2886 !(mnt_flags & MNT_NOEXEC))
2887 return false;
2888
2889 if ((fl & MNT_LOCK_ATIME) &&
2890 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2891 return false;
2892
2893 return true;
2894 }
2895
change_mount_ro_state(struct mount * mnt,unsigned int mnt_flags)2896 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2897 {
2898 bool readonly_request = (mnt_flags & MNT_READONLY);
2899
2900 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2901 return 0;
2902
2903 if (readonly_request)
2904 return mnt_make_readonly(mnt);
2905
2906 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2907 return 0;
2908 }
2909
set_mount_attributes(struct mount * mnt,unsigned int mnt_flags)2910 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2911 {
2912 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2913 mnt->mnt.mnt_flags = mnt_flags;
2914 touch_mnt_namespace(mnt->mnt_ns);
2915 }
2916
mnt_warn_timestamp_expiry(struct path * mountpoint,struct vfsmount * mnt)2917 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2918 {
2919 struct super_block *sb = mnt->mnt_sb;
2920
2921 if (!__mnt_is_readonly(mnt) &&
2922 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
2923 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2924 char *buf = (char *)__get_free_page(GFP_KERNEL);
2925 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2926
2927 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
2928 sb->s_type->name,
2929 is_mounted(mnt) ? "remounted" : "mounted",
2930 mntpath, &sb->s_time_max,
2931 (unsigned long long)sb->s_time_max);
2932
2933 free_page((unsigned long)buf);
2934 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
2935 }
2936 }
2937
2938 /*
2939 * Handle reconfiguration of the mountpoint only without alteration of the
2940 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2941 * to mount(2).
2942 */
do_reconfigure_mnt(struct path * path,unsigned int mnt_flags)2943 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2944 {
2945 struct super_block *sb = path->mnt->mnt_sb;
2946 struct mount *mnt = real_mount(path->mnt);
2947 int ret;
2948
2949 if (!check_mnt(mnt))
2950 return -EINVAL;
2951
2952 if (!path_mounted(path))
2953 return -EINVAL;
2954
2955 if (!can_change_locked_flags(mnt, mnt_flags))
2956 return -EPERM;
2957
2958 /*
2959 * We're only checking whether the superblock is read-only not
2960 * changing it, so only take down_read(&sb->s_umount).
2961 */
2962 down_read(&sb->s_umount);
2963 lock_mount_hash();
2964 ret = change_mount_ro_state(mnt, mnt_flags);
2965 if (ret == 0)
2966 set_mount_attributes(mnt, mnt_flags);
2967 unlock_mount_hash();
2968 up_read(&sb->s_umount);
2969
2970 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2971
2972 return ret;
2973 }
2974
2975 /*
2976 * change filesystem flags. dir should be a physical root of filesystem.
2977 * If you've mounted a non-root directory somewhere and want to do remount
2978 * on it - tough luck.
2979 */
do_remount(struct path * path,int ms_flags,int sb_flags,int mnt_flags,void * data)2980 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2981 int mnt_flags, void *data)
2982 {
2983 int err;
2984 struct super_block *sb = path->mnt->mnt_sb;
2985 struct mount *mnt = real_mount(path->mnt);
2986 struct fs_context *fc;
2987
2988 if (!check_mnt(mnt))
2989 return -EINVAL;
2990
2991 if (!path_mounted(path))
2992 return -EINVAL;
2993
2994 if (!can_change_locked_flags(mnt, mnt_flags))
2995 return -EPERM;
2996
2997 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2998 if (IS_ERR(fc))
2999 return PTR_ERR(fc);
3000
3001 /*
3002 * Indicate to the filesystem that the remount request is coming
3003 * from the legacy mount system call.
3004 */
3005 fc->oldapi = true;
3006
3007 err = parse_monolithic_mount_data(fc, data);
3008 if (!err) {
3009 down_write(&sb->s_umount);
3010 err = -EPERM;
3011 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
3012 err = reconfigure_super(fc);
3013 if (!err) {
3014 lock_mount_hash();
3015 set_mount_attributes(mnt, mnt_flags);
3016 unlock_mount_hash();
3017 }
3018 }
3019 up_write(&sb->s_umount);
3020 }
3021
3022 mnt_warn_timestamp_expiry(path, &mnt->mnt);
3023
3024 put_fs_context(fc);
3025 return err;
3026 }
3027
tree_contains_unbindable(struct mount * mnt)3028 static inline int tree_contains_unbindable(struct mount *mnt)
3029 {
3030 struct mount *p;
3031 for (p = mnt; p; p = next_mnt(p, mnt)) {
3032 if (IS_MNT_UNBINDABLE(p))
3033 return 1;
3034 }
3035 return 0;
3036 }
3037
3038 /*
3039 * Check that there aren't references to earlier/same mount namespaces in the
3040 * specified subtree. Such references can act as pins for mount namespaces
3041 * that aren't checked by the mount-cycle checking code, thereby allowing
3042 * cycles to be made.
3043 */
check_for_nsfs_mounts(struct mount * subtree)3044 static bool check_for_nsfs_mounts(struct mount *subtree)
3045 {
3046 struct mount *p;
3047 bool ret = false;
3048
3049 lock_mount_hash();
3050 for (p = subtree; p; p = next_mnt(p, subtree))
3051 if (mnt_ns_loop(p->mnt.mnt_root))
3052 goto out;
3053
3054 ret = true;
3055 out:
3056 unlock_mount_hash();
3057 return ret;
3058 }
3059
do_set_group(struct path * from_path,struct path * to_path)3060 static int do_set_group(struct path *from_path, struct path *to_path)
3061 {
3062 struct mount *from, *to;
3063 int err;
3064
3065 from = real_mount(from_path->mnt);
3066 to = real_mount(to_path->mnt);
3067
3068 namespace_lock();
3069
3070 err = -EINVAL;
3071 /* To and From must be mounted */
3072 if (!is_mounted(&from->mnt))
3073 goto out;
3074 if (!is_mounted(&to->mnt))
3075 goto out;
3076
3077 err = -EPERM;
3078 /* We should be allowed to modify mount namespaces of both mounts */
3079 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
3080 goto out;
3081 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
3082 goto out;
3083
3084 err = -EINVAL;
3085 /* To and From paths should be mount roots */
3086 if (!path_mounted(from_path))
3087 goto out;
3088 if (!path_mounted(to_path))
3089 goto out;
3090
3091 /* Setting sharing groups is only allowed across same superblock */
3092 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
3093 goto out;
3094
3095 /* From mount root should be wider than To mount root */
3096 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
3097 goto out;
3098
3099 /* From mount should not have locked children in place of To's root */
3100 if (has_locked_children(from, to->mnt.mnt_root))
3101 goto out;
3102
3103 /* Setting sharing groups is only allowed on private mounts */
3104 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
3105 goto out;
3106
3107 /* From should not be private */
3108 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
3109 goto out;
3110
3111 if (IS_MNT_SLAVE(from)) {
3112 struct mount *m = from->mnt_master;
3113
3114 list_add(&to->mnt_slave, &m->mnt_slave_list);
3115 to->mnt_master = m;
3116 }
3117
3118 if (IS_MNT_SHARED(from)) {
3119 to->mnt_group_id = from->mnt_group_id;
3120 list_add(&to->mnt_share, &from->mnt_share);
3121 lock_mount_hash();
3122 set_mnt_shared(to);
3123 unlock_mount_hash();
3124 }
3125
3126 err = 0;
3127 out:
3128 namespace_unlock();
3129 return err;
3130 }
3131
3132 /**
3133 * path_overmounted - check if path is overmounted
3134 * @path: path to check
3135 *
3136 * Check if path is overmounted, i.e., if there's a mount on top of
3137 * @path->mnt with @path->dentry as mountpoint.
3138 *
3139 * Context: This function expects namespace_lock() to be held.
3140 * Return: If path is overmounted true is returned, false if not.
3141 */
path_overmounted(const struct path * path)3142 static inline bool path_overmounted(const struct path *path)
3143 {
3144 rcu_read_lock();
3145 if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
3146 rcu_read_unlock();
3147 return true;
3148 }
3149 rcu_read_unlock();
3150 return false;
3151 }
3152
3153 /**
3154 * can_move_mount_beneath - check that we can mount beneath the top mount
3155 * @from: mount to mount beneath
3156 * @to: mount under which to mount
3157 * @mp: mountpoint of @to
3158 *
3159 * - Make sure that @to->dentry is actually the root of a mount under
3160 * which we can mount another mount.
3161 * - Make sure that nothing can be mounted beneath the caller's current
3162 * root or the rootfs of the namespace.
3163 * - Make sure that the caller can unmount the topmost mount ensuring
3164 * that the caller could reveal the underlying mountpoint.
3165 * - Ensure that nothing has been mounted on top of @from before we
3166 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3167 * - Prevent mounting beneath a mount if the propagation relationship
3168 * between the source mount, parent mount, and top mount would lead to
3169 * nonsensical mount trees.
3170 *
3171 * Context: This function expects namespace_lock() to be held.
3172 * Return: On success 0, and on error a negative error code is returned.
3173 */
can_move_mount_beneath(const struct path * from,const struct path * to,const struct mountpoint * mp)3174 static int can_move_mount_beneath(const struct path *from,
3175 const struct path *to,
3176 const struct mountpoint *mp)
3177 {
3178 struct mount *mnt_from = real_mount(from->mnt),
3179 *mnt_to = real_mount(to->mnt),
3180 *parent_mnt_to = mnt_to->mnt_parent;
3181
3182 if (!mnt_has_parent(mnt_to))
3183 return -EINVAL;
3184
3185 if (!path_mounted(to))
3186 return -EINVAL;
3187
3188 if (IS_MNT_LOCKED(mnt_to))
3189 return -EINVAL;
3190
3191 /* Avoid creating shadow mounts during mount propagation. */
3192 if (path_overmounted(from))
3193 return -EINVAL;
3194
3195 /*
3196 * Mounting beneath the rootfs only makes sense when the
3197 * semantics of pivot_root(".", ".") are used.
3198 */
3199 if (&mnt_to->mnt == current->fs->root.mnt)
3200 return -EINVAL;
3201 if (parent_mnt_to == current->nsproxy->mnt_ns->root)
3202 return -EINVAL;
3203
3204 for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent)
3205 if (p == mnt_to)
3206 return -EINVAL;
3207
3208 /*
3209 * If the parent mount propagates to the child mount this would
3210 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3211 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3212 * defeats the whole purpose of mounting beneath another mount.
3213 */
3214 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
3215 return -EINVAL;
3216
3217 /*
3218 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3219 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3220 * Afterwards @mnt_from would be mounted on top of
3221 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3222 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3223 * already mounted on @mnt_from, @mnt_to would ultimately be
3224 * remounted on top of @c. Afterwards, @mnt_from would be
3225 * covered by a copy @c of @mnt_from and @c would be covered by
3226 * @mnt_from itself. This defeats the whole purpose of mounting
3227 * @mnt_from beneath @mnt_to.
3228 */
3229 if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
3230 return -EINVAL;
3231
3232 return 0;
3233 }
3234
do_move_mount(struct path * old_path,struct path * new_path,bool beneath)3235 static int do_move_mount(struct path *old_path, struct path *new_path,
3236 bool beneath)
3237 {
3238 struct mnt_namespace *ns;
3239 struct mount *p;
3240 struct mount *old;
3241 struct mount *parent;
3242 struct mountpoint *mp, *old_mp;
3243 int err;
3244 bool attached;
3245 enum mnt_tree_flags_t flags = 0;
3246
3247 mp = do_lock_mount(new_path, beneath);
3248 if (IS_ERR(mp))
3249 return PTR_ERR(mp);
3250
3251 old = real_mount(old_path->mnt);
3252 p = real_mount(new_path->mnt);
3253 parent = old->mnt_parent;
3254 attached = mnt_has_parent(old);
3255 if (attached)
3256 flags |= MNT_TREE_MOVE;
3257 old_mp = old->mnt_mp;
3258 ns = old->mnt_ns;
3259
3260 err = -EINVAL;
3261 /* The mountpoint must be in our namespace. */
3262 if (!check_mnt(p))
3263 goto out;
3264
3265 /* The thing moved must be mounted... */
3266 if (!is_mounted(&old->mnt))
3267 goto out;
3268
3269 /* ... and either ours or the root of anon namespace */
3270 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
3271 goto out;
3272
3273 if (old->mnt.mnt_flags & MNT_LOCKED)
3274 goto out;
3275
3276 if (!path_mounted(old_path))
3277 goto out;
3278
3279 if (d_is_dir(new_path->dentry) !=
3280 d_is_dir(old_path->dentry))
3281 goto out;
3282 /*
3283 * Don't move a mount residing in a shared parent.
3284 */
3285 if (attached && IS_MNT_SHARED(parent))
3286 goto out;
3287
3288 if (beneath) {
3289 err = can_move_mount_beneath(old_path, new_path, mp);
3290 if (err)
3291 goto out;
3292
3293 err = -EINVAL;
3294 p = p->mnt_parent;
3295 flags |= MNT_TREE_BENEATH;
3296 }
3297
3298 /*
3299 * Don't move a mount tree containing unbindable mounts to a destination
3300 * mount which is shared.
3301 */
3302 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
3303 goto out;
3304 err = -ELOOP;
3305 if (!check_for_nsfs_mounts(old))
3306 goto out;
3307 for (; mnt_has_parent(p); p = p->mnt_parent)
3308 if (p == old)
3309 goto out;
3310
3311 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
3312 if (err)
3313 goto out;
3314
3315 /* if the mount is moved, it should no longer be expire
3316 * automatically */
3317 list_del_init(&old->mnt_expire);
3318 if (attached)
3319 put_mountpoint(old_mp);
3320 out:
3321 unlock_mount(mp);
3322 if (!err) {
3323 if (attached)
3324 mntput_no_expire(parent);
3325 else
3326 free_mnt_ns(ns);
3327 }
3328 return err;
3329 }
3330
do_move_mount_old(struct path * path,const char * old_name)3331 static int do_move_mount_old(struct path *path, const char *old_name)
3332 {
3333 struct path old_path;
3334 int err;
3335
3336 if (!old_name || !*old_name)
3337 return -EINVAL;
3338
3339 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3340 if (err)
3341 return err;
3342
3343 err = do_move_mount(&old_path, path, false);
3344 path_put(&old_path);
3345 return err;
3346 }
3347
3348 /*
3349 * add a mount into a namespace's mount tree
3350 */
do_add_mount(struct mount * newmnt,struct mountpoint * mp,const struct path * path,int mnt_flags)3351 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
3352 const struct path *path, int mnt_flags)
3353 {
3354 struct mount *parent = real_mount(path->mnt);
3355
3356 mnt_flags &= ~MNT_INTERNAL_FLAGS;
3357
3358 if (unlikely(!check_mnt(parent))) {
3359 /* that's acceptable only for automounts done in private ns */
3360 if (!(mnt_flags & MNT_SHRINKABLE))
3361 return -EINVAL;
3362 /* ... and for those we'd better have mountpoint still alive */
3363 if (!parent->mnt_ns)
3364 return -EINVAL;
3365 }
3366
3367 /* Refuse the same filesystem on the same mount point */
3368 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
3369 return -EBUSY;
3370
3371 if (d_is_symlink(newmnt->mnt.mnt_root))
3372 return -EINVAL;
3373
3374 newmnt->mnt.mnt_flags = mnt_flags;
3375 return graft_tree(newmnt, parent, mp);
3376 }
3377
3378 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3379
3380 /*
3381 * Create a new mount using a superblock configuration and request it
3382 * be added to the namespace tree.
3383 */
do_new_mount_fc(struct fs_context * fc,struct path * mountpoint,unsigned int mnt_flags)3384 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
3385 unsigned int mnt_flags)
3386 {
3387 struct vfsmount *mnt;
3388 struct mountpoint *mp;
3389 struct super_block *sb = fc->root->d_sb;
3390 int error;
3391
3392 error = security_sb_kern_mount(sb);
3393 if (!error && mount_too_revealing(sb, &mnt_flags))
3394 error = -EPERM;
3395
3396 if (unlikely(error)) {
3397 fc_drop_locked(fc);
3398 return error;
3399 }
3400
3401 up_write(&sb->s_umount);
3402
3403 mnt = vfs_create_mount(fc);
3404 if (IS_ERR(mnt))
3405 return PTR_ERR(mnt);
3406
3407 mnt_warn_timestamp_expiry(mountpoint, mnt);
3408
3409 mp = lock_mount(mountpoint);
3410 if (IS_ERR(mp)) {
3411 mntput(mnt);
3412 return PTR_ERR(mp);
3413 }
3414 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3415 unlock_mount(mp);
3416 if (error < 0)
3417 mntput(mnt);
3418 return error;
3419 }
3420
3421 /*
3422 * create a new mount for userspace and request it to be added into the
3423 * namespace's tree
3424 */
do_new_mount(struct path * path,const char * fstype,int sb_flags,int mnt_flags,const char * name,void * data)3425 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
3426 int mnt_flags, const char *name, void *data)
3427 {
3428 struct file_system_type *type;
3429 struct fs_context *fc;
3430 const char *subtype = NULL;
3431 int err = 0;
3432
3433 if (!fstype)
3434 return -EINVAL;
3435
3436 type = get_fs_type(fstype);
3437 if (!type)
3438 return -ENODEV;
3439
3440 if (type->fs_flags & FS_HAS_SUBTYPE) {
3441 subtype = strchr(fstype, '.');
3442 if (subtype) {
3443 subtype++;
3444 if (!*subtype) {
3445 put_filesystem(type);
3446 return -EINVAL;
3447 }
3448 }
3449 }
3450
3451 fc = fs_context_for_mount(type, sb_flags);
3452 put_filesystem(type);
3453 if (IS_ERR(fc))
3454 return PTR_ERR(fc);
3455
3456 /*
3457 * Indicate to the filesystem that the mount request is coming
3458 * from the legacy mount system call.
3459 */
3460 fc->oldapi = true;
3461
3462 if (subtype)
3463 err = vfs_parse_fs_string(fc, "subtype",
3464 subtype, strlen(subtype));
3465 if (!err && name)
3466 err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3467 if (!err)
3468 err = parse_monolithic_mount_data(fc, data);
3469 if (!err && !mount_capable(fc))
3470 err = -EPERM;
3471 if (!err)
3472 err = vfs_get_tree(fc);
3473 if (!err)
3474 err = do_new_mount_fc(fc, path, mnt_flags);
3475
3476 put_fs_context(fc);
3477 return err;
3478 }
3479
finish_automount(struct vfsmount * m,const struct path * path)3480 int finish_automount(struct vfsmount *m, const struct path *path)
3481 {
3482 struct dentry *dentry = path->dentry;
3483 struct mountpoint *mp;
3484 struct mount *mnt;
3485 int err;
3486
3487 if (!m)
3488 return 0;
3489 if (IS_ERR(m))
3490 return PTR_ERR(m);
3491
3492 mnt = real_mount(m);
3493 /* The new mount record should have at least 2 refs to prevent it being
3494 * expired before we get a chance to add it
3495 */
3496 BUG_ON(mnt_get_count(mnt) < 2);
3497
3498 if (m->mnt_sb == path->mnt->mnt_sb &&
3499 m->mnt_root == dentry) {
3500 err = -ELOOP;
3501 goto discard;
3502 }
3503
3504 /*
3505 * we don't want to use lock_mount() - in this case finding something
3506 * that overmounts our mountpoint to be means "quitely drop what we've
3507 * got", not "try to mount it on top".
3508 */
3509 inode_lock(dentry->d_inode);
3510 namespace_lock();
3511 if (unlikely(cant_mount(dentry))) {
3512 err = -ENOENT;
3513 goto discard_locked;
3514 }
3515 if (path_overmounted(path)) {
3516 err = 0;
3517 goto discard_locked;
3518 }
3519 mp = get_mountpoint(dentry);
3520 if (IS_ERR(mp)) {
3521 err = PTR_ERR(mp);
3522 goto discard_locked;
3523 }
3524
3525 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3526 unlock_mount(mp);
3527 if (unlikely(err))
3528 goto discard;
3529 mntput(m);
3530 return 0;
3531
3532 discard_locked:
3533 namespace_unlock();
3534 inode_unlock(dentry->d_inode);
3535 discard:
3536 /* remove m from any expiration list it may be on */
3537 if (!list_empty(&mnt->mnt_expire)) {
3538 namespace_lock();
3539 list_del_init(&mnt->mnt_expire);
3540 namespace_unlock();
3541 }
3542 mntput(m);
3543 mntput(m);
3544 return err;
3545 }
3546
3547 /**
3548 * mnt_set_expiry - Put a mount on an expiration list
3549 * @mnt: The mount to list.
3550 * @expiry_list: The list to add the mount to.
3551 */
mnt_set_expiry(struct vfsmount * mnt,struct list_head * expiry_list)3552 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3553 {
3554 namespace_lock();
3555
3556 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3557
3558 namespace_unlock();
3559 }
3560 EXPORT_SYMBOL(mnt_set_expiry);
3561
3562 /*
3563 * process a list of expirable mountpoints with the intent of discarding any
3564 * mountpoints that aren't in use and haven't been touched since last we came
3565 * here
3566 */
mark_mounts_for_expiry(struct list_head * mounts)3567 void mark_mounts_for_expiry(struct list_head *mounts)
3568 {
3569 struct mount *mnt, *next;
3570 LIST_HEAD(graveyard);
3571
3572 if (list_empty(mounts))
3573 return;
3574
3575 namespace_lock();
3576 lock_mount_hash();
3577
3578 /* extract from the expiration list every vfsmount that matches the
3579 * following criteria:
3580 * - only referenced by its parent vfsmount
3581 * - still marked for expiry (marked on the last call here; marks are
3582 * cleared by mntput())
3583 */
3584 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3585 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3586 propagate_mount_busy(mnt, 1))
3587 continue;
3588 list_move(&mnt->mnt_expire, &graveyard);
3589 }
3590 while (!list_empty(&graveyard)) {
3591 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3592 touch_mnt_namespace(mnt->mnt_ns);
3593 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3594 }
3595 unlock_mount_hash();
3596 namespace_unlock();
3597 }
3598
3599 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3600
3601 /*
3602 * Ripoff of 'select_parent()'
3603 *
3604 * search the list of submounts for a given mountpoint, and move any
3605 * shrinkable submounts to the 'graveyard' list.
3606 */
select_submounts(struct mount * parent,struct list_head * graveyard)3607 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3608 {
3609 struct mount *this_parent = parent;
3610 struct list_head *next;
3611 int found = 0;
3612
3613 repeat:
3614 next = this_parent->mnt_mounts.next;
3615 resume:
3616 while (next != &this_parent->mnt_mounts) {
3617 struct list_head *tmp = next;
3618 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3619
3620 next = tmp->next;
3621 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3622 continue;
3623 /*
3624 * Descend a level if the d_mounts list is non-empty.
3625 */
3626 if (!list_empty(&mnt->mnt_mounts)) {
3627 this_parent = mnt;
3628 goto repeat;
3629 }
3630
3631 if (!propagate_mount_busy(mnt, 1)) {
3632 list_move_tail(&mnt->mnt_expire, graveyard);
3633 found++;
3634 }
3635 }
3636 /*
3637 * All done at this level ... ascend and resume the search
3638 */
3639 if (this_parent != parent) {
3640 next = this_parent->mnt_child.next;
3641 this_parent = this_parent->mnt_parent;
3642 goto resume;
3643 }
3644 return found;
3645 }
3646
3647 /*
3648 * process a list of expirable mountpoints with the intent of discarding any
3649 * submounts of a specific parent mountpoint
3650 *
3651 * mount_lock must be held for write
3652 */
shrink_submounts(struct mount * mnt)3653 static void shrink_submounts(struct mount *mnt)
3654 {
3655 LIST_HEAD(graveyard);
3656 struct mount *m;
3657
3658 /* extract submounts of 'mountpoint' from the expiration list */
3659 while (select_submounts(mnt, &graveyard)) {
3660 while (!list_empty(&graveyard)) {
3661 m = list_first_entry(&graveyard, struct mount,
3662 mnt_expire);
3663 touch_mnt_namespace(m->mnt_ns);
3664 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3665 }
3666 }
3667 }
3668
copy_mount_options(const void __user * data)3669 static void *copy_mount_options(const void __user * data)
3670 {
3671 char *copy;
3672 unsigned left, offset;
3673
3674 if (!data)
3675 return NULL;
3676
3677 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3678 if (!copy)
3679 return ERR_PTR(-ENOMEM);
3680
3681 left = copy_from_user(copy, data, PAGE_SIZE);
3682
3683 /*
3684 * Not all architectures have an exact copy_from_user(). Resort to
3685 * byte at a time.
3686 */
3687 offset = PAGE_SIZE - left;
3688 while (left) {
3689 char c;
3690 if (get_user(c, (const char __user *)data + offset))
3691 break;
3692 copy[offset] = c;
3693 left--;
3694 offset++;
3695 }
3696
3697 if (left == PAGE_SIZE) {
3698 kfree(copy);
3699 return ERR_PTR(-EFAULT);
3700 }
3701
3702 return copy;
3703 }
3704
copy_mount_string(const void __user * data)3705 static char *copy_mount_string(const void __user *data)
3706 {
3707 return data ? strndup_user(data, PATH_MAX) : NULL;
3708 }
3709
3710 /*
3711 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3712 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3713 *
3714 * data is a (void *) that can point to any structure up to
3715 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3716 * information (or be NULL).
3717 *
3718 * Pre-0.97 versions of mount() didn't have a flags word.
3719 * When the flags word was introduced its top half was required
3720 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3721 * Therefore, if this magic number is present, it carries no information
3722 * and must be discarded.
3723 */
path_mount(const char * dev_name,struct path * path,const char * type_page,unsigned long flags,void * data_page)3724 int path_mount(const char *dev_name, struct path *path,
3725 const char *type_page, unsigned long flags, void *data_page)
3726 {
3727 unsigned int mnt_flags = 0, sb_flags;
3728 int ret;
3729
3730 /* Discard magic */
3731 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3732 flags &= ~MS_MGC_MSK;
3733
3734 /* Basic sanity checks */
3735 if (data_page)
3736 ((char *)data_page)[PAGE_SIZE - 1] = 0;
3737
3738 if (flags & MS_NOUSER)
3739 return -EINVAL;
3740
3741 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3742 if (ret)
3743 return ret;
3744 if (!may_mount())
3745 return -EPERM;
3746 if (flags & SB_MANDLOCK)
3747 warn_mandlock();
3748
3749 /* Default to relatime unless overriden */
3750 if (!(flags & MS_NOATIME))
3751 mnt_flags |= MNT_RELATIME;
3752
3753 /* Separate the per-mountpoint flags */
3754 if (flags & MS_NOSUID)
3755 mnt_flags |= MNT_NOSUID;
3756 if (flags & MS_NODEV)
3757 mnt_flags |= MNT_NODEV;
3758 if (flags & MS_NOEXEC)
3759 mnt_flags |= MNT_NOEXEC;
3760 if (flags & MS_NOATIME)
3761 mnt_flags |= MNT_NOATIME;
3762 if (flags & MS_NODIRATIME)
3763 mnt_flags |= MNT_NODIRATIME;
3764 if (flags & MS_STRICTATIME)
3765 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3766 if (flags & MS_RDONLY)
3767 mnt_flags |= MNT_READONLY;
3768 if (flags & MS_NOSYMFOLLOW)
3769 mnt_flags |= MNT_NOSYMFOLLOW;
3770
3771 /* The default atime for remount is preservation */
3772 if ((flags & MS_REMOUNT) &&
3773 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3774 MS_STRICTATIME)) == 0)) {
3775 mnt_flags &= ~MNT_ATIME_MASK;
3776 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3777 }
3778
3779 sb_flags = flags & (SB_RDONLY |
3780 SB_SYNCHRONOUS |
3781 SB_MANDLOCK |
3782 SB_DIRSYNC |
3783 SB_SILENT |
3784 SB_POSIXACL |
3785 SB_LAZYTIME |
3786 SB_I_VERSION);
3787
3788 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3789 return do_reconfigure_mnt(path, mnt_flags);
3790 if (flags & MS_REMOUNT)
3791 return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3792 if (flags & MS_BIND)
3793 return do_loopback(path, dev_name, flags & MS_REC);
3794 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3795 return do_change_type(path, flags);
3796 if (flags & MS_MOVE)
3797 return do_move_mount_old(path, dev_name);
3798
3799 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3800 data_page);
3801 }
3802
do_mount(const char * dev_name,const char __user * dir_name,const char * type_page,unsigned long flags,void * data_page)3803 long do_mount(const char *dev_name, const char __user *dir_name,
3804 const char *type_page, unsigned long flags, void *data_page)
3805 {
3806 struct path path;
3807 int ret;
3808
3809 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3810 if (ret)
3811 return ret;
3812 ret = path_mount(dev_name, &path, type_page, flags, data_page);
3813 path_put(&path);
3814 return ret;
3815 }
3816
inc_mnt_namespaces(struct user_namespace * ns)3817 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3818 {
3819 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3820 }
3821
dec_mnt_namespaces(struct ucounts * ucounts)3822 static void dec_mnt_namespaces(struct ucounts *ucounts)
3823 {
3824 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3825 }
3826
free_mnt_ns(struct mnt_namespace * ns)3827 static void free_mnt_ns(struct mnt_namespace *ns)
3828 {
3829 if (!is_anon_ns(ns))
3830 ns_free_inum(&ns->ns);
3831 dec_mnt_namespaces(ns->ucounts);
3832 mnt_ns_tree_remove(ns);
3833 }
3834
3835 /*
3836 * Assign a sequence number so we can detect when we attempt to bind
3837 * mount a reference to an older mount namespace into the current
3838 * mount namespace, preventing reference counting loops. A 64bit
3839 * number incrementing at 10Ghz will take 12,427 years to wrap which
3840 * is effectively never, so we can ignore the possibility.
3841 */
3842 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3843
alloc_mnt_ns(struct user_namespace * user_ns,bool anon)3844 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3845 {
3846 struct mnt_namespace *new_ns;
3847 struct ucounts *ucounts;
3848 int ret;
3849
3850 ucounts = inc_mnt_namespaces(user_ns);
3851 if (!ucounts)
3852 return ERR_PTR(-ENOSPC);
3853
3854 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3855 if (!new_ns) {
3856 dec_mnt_namespaces(ucounts);
3857 return ERR_PTR(-ENOMEM);
3858 }
3859 if (!anon) {
3860 ret = ns_alloc_inum(&new_ns->ns);
3861 if (ret) {
3862 kfree(new_ns);
3863 dec_mnt_namespaces(ucounts);
3864 return ERR_PTR(ret);
3865 }
3866 }
3867 new_ns->ns.ops = &mntns_operations;
3868 if (!anon)
3869 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3870 refcount_set(&new_ns->ns.count, 1);
3871 refcount_set(&new_ns->passive, 1);
3872 new_ns->mounts = RB_ROOT;
3873 RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node);
3874 init_waitqueue_head(&new_ns->poll);
3875 new_ns->user_ns = get_user_ns(user_ns);
3876 new_ns->ucounts = ucounts;
3877 return new_ns;
3878 }
3879
3880 __latent_entropy
copy_mnt_ns(unsigned long flags,struct mnt_namespace * ns,struct user_namespace * user_ns,struct fs_struct * new_fs)3881 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3882 struct user_namespace *user_ns, struct fs_struct *new_fs)
3883 {
3884 struct mnt_namespace *new_ns;
3885 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3886 struct mount *p, *q;
3887 struct mount *old;
3888 struct mount *new;
3889 int copy_flags;
3890
3891 BUG_ON(!ns);
3892
3893 if (likely(!(flags & CLONE_NEWNS))) {
3894 get_mnt_ns(ns);
3895 return ns;
3896 }
3897
3898 old = ns->root;
3899
3900 new_ns = alloc_mnt_ns(user_ns, false);
3901 if (IS_ERR(new_ns))
3902 return new_ns;
3903
3904 namespace_lock();
3905 /* First pass: copy the tree topology */
3906 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3907 if (user_ns != ns->user_ns)
3908 copy_flags |= CL_SHARED_TO_SLAVE;
3909 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3910 if (IS_ERR(new)) {
3911 namespace_unlock();
3912 free_mnt_ns(new_ns);
3913 return ERR_CAST(new);
3914 }
3915 if (user_ns != ns->user_ns) {
3916 lock_mount_hash();
3917 lock_mnt_tree(new);
3918 unlock_mount_hash();
3919 }
3920 new_ns->root = new;
3921
3922 /*
3923 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3924 * as belonging to new namespace. We have already acquired a private
3925 * fs_struct, so tsk->fs->lock is not needed.
3926 */
3927 p = old;
3928 q = new;
3929 while (p) {
3930 mnt_add_to_ns(new_ns, q);
3931 new_ns->nr_mounts++;
3932 if (new_fs) {
3933 if (&p->mnt == new_fs->root.mnt) {
3934 new_fs->root.mnt = mntget(&q->mnt);
3935 rootmnt = &p->mnt;
3936 }
3937 if (&p->mnt == new_fs->pwd.mnt) {
3938 new_fs->pwd.mnt = mntget(&q->mnt);
3939 pwdmnt = &p->mnt;
3940 }
3941 }
3942 p = next_mnt(p, old);
3943 q = next_mnt(q, new);
3944 if (!q)
3945 break;
3946 // an mntns binding we'd skipped?
3947 while (p->mnt.mnt_root != q->mnt.mnt_root)
3948 p = next_mnt(skip_mnt_tree(p), old);
3949 }
3950 mnt_ns_tree_add(new_ns);
3951 namespace_unlock();
3952
3953 if (rootmnt)
3954 mntput(rootmnt);
3955 if (pwdmnt)
3956 mntput(pwdmnt);
3957
3958 return new_ns;
3959 }
3960
mount_subtree(struct vfsmount * m,const char * name)3961 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3962 {
3963 struct mount *mnt = real_mount(m);
3964 struct mnt_namespace *ns;
3965 struct super_block *s;
3966 struct path path;
3967 int err;
3968
3969 ns = alloc_mnt_ns(&init_user_ns, true);
3970 if (IS_ERR(ns)) {
3971 mntput(m);
3972 return ERR_CAST(ns);
3973 }
3974 ns->root = mnt;
3975 ns->nr_mounts++;
3976 mnt_add_to_ns(ns, mnt);
3977
3978 err = vfs_path_lookup(m->mnt_root, m,
3979 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3980
3981 put_mnt_ns(ns);
3982
3983 if (err)
3984 return ERR_PTR(err);
3985
3986 /* trade a vfsmount reference for active sb one */
3987 s = path.mnt->mnt_sb;
3988 atomic_inc(&s->s_active);
3989 mntput(path.mnt);
3990 /* lock the sucker */
3991 down_write(&s->s_umount);
3992 /* ... and return the root of (sub)tree on it */
3993 return path.dentry;
3994 }
3995 EXPORT_SYMBOL(mount_subtree);
3996
SYSCALL_DEFINE5(mount,char __user *,dev_name,char __user *,dir_name,char __user *,type,unsigned long,flags,void __user *,data)3997 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3998 char __user *, type, unsigned long, flags, void __user *, data)
3999 {
4000 int ret;
4001 char *kernel_type;
4002 char *kernel_dev;
4003 void *options;
4004
4005 kernel_type = copy_mount_string(type);
4006 ret = PTR_ERR(kernel_type);
4007 if (IS_ERR(kernel_type))
4008 goto out_type;
4009
4010 kernel_dev = copy_mount_string(dev_name);
4011 ret = PTR_ERR(kernel_dev);
4012 if (IS_ERR(kernel_dev))
4013 goto out_dev;
4014
4015 options = copy_mount_options(data);
4016 ret = PTR_ERR(options);
4017 if (IS_ERR(options))
4018 goto out_data;
4019
4020 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
4021
4022 kfree(options);
4023 out_data:
4024 kfree(kernel_dev);
4025 out_dev:
4026 kfree(kernel_type);
4027 out_type:
4028 return ret;
4029 }
4030
4031 #define FSMOUNT_VALID_FLAGS \
4032 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
4033 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
4034 MOUNT_ATTR_NOSYMFOLLOW)
4035
4036 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
4037
4038 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
4039 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
4040
attr_flags_to_mnt_flags(u64 attr_flags)4041 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
4042 {
4043 unsigned int mnt_flags = 0;
4044
4045 if (attr_flags & MOUNT_ATTR_RDONLY)
4046 mnt_flags |= MNT_READONLY;
4047 if (attr_flags & MOUNT_ATTR_NOSUID)
4048 mnt_flags |= MNT_NOSUID;
4049 if (attr_flags & MOUNT_ATTR_NODEV)
4050 mnt_flags |= MNT_NODEV;
4051 if (attr_flags & MOUNT_ATTR_NOEXEC)
4052 mnt_flags |= MNT_NOEXEC;
4053 if (attr_flags & MOUNT_ATTR_NODIRATIME)
4054 mnt_flags |= MNT_NODIRATIME;
4055 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
4056 mnt_flags |= MNT_NOSYMFOLLOW;
4057
4058 return mnt_flags;
4059 }
4060
4061 /*
4062 * Create a kernel mount representation for a new, prepared superblock
4063 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
4064 */
SYSCALL_DEFINE3(fsmount,int,fs_fd,unsigned int,flags,unsigned int,attr_flags)4065 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
4066 unsigned int, attr_flags)
4067 {
4068 struct mnt_namespace *ns;
4069 struct fs_context *fc;
4070 struct file *file;
4071 struct path newmount;
4072 struct mount *mnt;
4073 struct fd f;
4074 unsigned int mnt_flags = 0;
4075 long ret;
4076
4077 if (!may_mount())
4078 return -EPERM;
4079
4080 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
4081 return -EINVAL;
4082
4083 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
4084 return -EINVAL;
4085
4086 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
4087
4088 switch (attr_flags & MOUNT_ATTR__ATIME) {
4089 case MOUNT_ATTR_STRICTATIME:
4090 break;
4091 case MOUNT_ATTR_NOATIME:
4092 mnt_flags |= MNT_NOATIME;
4093 break;
4094 case MOUNT_ATTR_RELATIME:
4095 mnt_flags |= MNT_RELATIME;
4096 break;
4097 default:
4098 return -EINVAL;
4099 }
4100
4101 f = fdget(fs_fd);
4102 if (!f.file)
4103 return -EBADF;
4104
4105 ret = -EINVAL;
4106 if (f.file->f_op != &fscontext_fops)
4107 goto err_fsfd;
4108
4109 fc = f.file->private_data;
4110
4111 ret = mutex_lock_interruptible(&fc->uapi_mutex);
4112 if (ret < 0)
4113 goto err_fsfd;
4114
4115 /* There must be a valid superblock or we can't mount it */
4116 ret = -EINVAL;
4117 if (!fc->root)
4118 goto err_unlock;
4119
4120 ret = -EPERM;
4121 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
4122 pr_warn("VFS: Mount too revealing\n");
4123 goto err_unlock;
4124 }
4125
4126 ret = -EBUSY;
4127 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
4128 goto err_unlock;
4129
4130 if (fc->sb_flags & SB_MANDLOCK)
4131 warn_mandlock();
4132
4133 newmount.mnt = vfs_create_mount(fc);
4134 if (IS_ERR(newmount.mnt)) {
4135 ret = PTR_ERR(newmount.mnt);
4136 goto err_unlock;
4137 }
4138 newmount.dentry = dget(fc->root);
4139 newmount.mnt->mnt_flags = mnt_flags;
4140
4141 /* We've done the mount bit - now move the file context into more or
4142 * less the same state as if we'd done an fspick(). We don't want to
4143 * do any memory allocation or anything like that at this point as we
4144 * don't want to have to handle any errors incurred.
4145 */
4146 vfs_clean_context(fc);
4147
4148 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
4149 if (IS_ERR(ns)) {
4150 ret = PTR_ERR(ns);
4151 goto err_path;
4152 }
4153 mnt = real_mount(newmount.mnt);
4154 ns->root = mnt;
4155 ns->nr_mounts = 1;
4156 mnt_add_to_ns(ns, mnt);
4157 mntget(newmount.mnt);
4158
4159 /* Attach to an apparent O_PATH fd with a note that we need to unmount
4160 * it, not just simply put it.
4161 */
4162 file = dentry_open(&newmount, O_PATH, fc->cred);
4163 if (IS_ERR(file)) {
4164 dissolve_on_fput(newmount.mnt);
4165 ret = PTR_ERR(file);
4166 goto err_path;
4167 }
4168 file->f_mode |= FMODE_NEED_UNMOUNT;
4169
4170 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
4171 if (ret >= 0)
4172 fd_install(ret, file);
4173 else
4174 fput(file);
4175
4176 err_path:
4177 path_put(&newmount);
4178 err_unlock:
4179 mutex_unlock(&fc->uapi_mutex);
4180 err_fsfd:
4181 fdput(f);
4182 return ret;
4183 }
4184
4185 /*
4186 * Move a mount from one place to another. In combination with
4187 * fsopen()/fsmount() this is used to install a new mount and in combination
4188 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4189 * a mount subtree.
4190 *
4191 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4192 */
SYSCALL_DEFINE5(move_mount,int,from_dfd,const char __user *,from_pathname,int,to_dfd,const char __user *,to_pathname,unsigned int,flags)4193 SYSCALL_DEFINE5(move_mount,
4194 int, from_dfd, const char __user *, from_pathname,
4195 int, to_dfd, const char __user *, to_pathname,
4196 unsigned int, flags)
4197 {
4198 struct path from_path, to_path;
4199 unsigned int lflags;
4200 int ret = 0;
4201
4202 if (!may_mount())
4203 return -EPERM;
4204
4205 if (flags & ~MOVE_MOUNT__MASK)
4206 return -EINVAL;
4207
4208 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
4209 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
4210 return -EINVAL;
4211
4212 /* If someone gives a pathname, they aren't permitted to move
4213 * from an fd that requires unmount as we can't get at the flag
4214 * to clear it afterwards.
4215 */
4216 lflags = 0;
4217 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4218 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4219 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4220
4221 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
4222 if (ret < 0)
4223 return ret;
4224
4225 lflags = 0;
4226 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
4227 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
4228 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
4229
4230 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
4231 if (ret < 0)
4232 goto out_from;
4233
4234 ret = security_move_mount(&from_path, &to_path);
4235 if (ret < 0)
4236 goto out_to;
4237
4238 if (flags & MOVE_MOUNT_SET_GROUP)
4239 ret = do_set_group(&from_path, &to_path);
4240 else
4241 ret = do_move_mount(&from_path, &to_path,
4242 (flags & MOVE_MOUNT_BENEATH));
4243
4244 out_to:
4245 path_put(&to_path);
4246 out_from:
4247 path_put(&from_path);
4248 return ret;
4249 }
4250
4251 /*
4252 * Return true if path is reachable from root
4253 *
4254 * namespace_sem or mount_lock is held
4255 */
is_path_reachable(struct mount * mnt,struct dentry * dentry,const struct path * root)4256 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4257 const struct path *root)
4258 {
4259 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4260 dentry = mnt->mnt_mountpoint;
4261 mnt = mnt->mnt_parent;
4262 }
4263 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4264 }
4265
path_is_under(const struct path * path1,const struct path * path2)4266 bool path_is_under(const struct path *path1, const struct path *path2)
4267 {
4268 bool res;
4269 read_seqlock_excl(&mount_lock);
4270 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4271 read_sequnlock_excl(&mount_lock);
4272 return res;
4273 }
4274 EXPORT_SYMBOL(path_is_under);
4275
4276 /*
4277 * pivot_root Semantics:
4278 * Moves the root file system of the current process to the directory put_old,
4279 * makes new_root as the new root file system of the current process, and sets
4280 * root/cwd of all processes which had them on the current root to new_root.
4281 *
4282 * Restrictions:
4283 * The new_root and put_old must be directories, and must not be on the
4284 * same file system as the current process root. The put_old must be
4285 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4286 * pointed to by put_old must yield the same directory as new_root. No other
4287 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4288 *
4289 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4290 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4291 * in this situation.
4292 *
4293 * Notes:
4294 * - we don't move root/cwd if they are not at the root (reason: if something
4295 * cared enough to change them, it's probably wrong to force them elsewhere)
4296 * - it's okay to pick a root that isn't the root of a file system, e.g.
4297 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4298 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4299 * first.
4300 */
SYSCALL_DEFINE2(pivot_root,const char __user *,new_root,const char __user *,put_old)4301 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4302 const char __user *, put_old)
4303 {
4304 struct path new, old, root;
4305 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
4306 struct mountpoint *old_mp, *root_mp;
4307 int error;
4308
4309 if (!may_mount())
4310 return -EPERM;
4311
4312 error = user_path_at(AT_FDCWD, new_root,
4313 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
4314 if (error)
4315 goto out0;
4316
4317 error = user_path_at(AT_FDCWD, put_old,
4318 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
4319 if (error)
4320 goto out1;
4321
4322 error = security_sb_pivotroot(&old, &new);
4323 if (error)
4324 goto out2;
4325
4326 get_fs_root(current->fs, &root);
4327 old_mp = lock_mount(&old);
4328 error = PTR_ERR(old_mp);
4329 if (IS_ERR(old_mp))
4330 goto out3;
4331
4332 error = -EINVAL;
4333 new_mnt = real_mount(new.mnt);
4334 root_mnt = real_mount(root.mnt);
4335 old_mnt = real_mount(old.mnt);
4336 ex_parent = new_mnt->mnt_parent;
4337 root_parent = root_mnt->mnt_parent;
4338 if (IS_MNT_SHARED(old_mnt) ||
4339 IS_MNT_SHARED(ex_parent) ||
4340 IS_MNT_SHARED(root_parent))
4341 goto out4;
4342 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4343 goto out4;
4344 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4345 goto out4;
4346 error = -ENOENT;
4347 if (d_unlinked(new.dentry))
4348 goto out4;
4349 error = -EBUSY;
4350 if (new_mnt == root_mnt || old_mnt == root_mnt)
4351 goto out4; /* loop, on the same file system */
4352 error = -EINVAL;
4353 if (!path_mounted(&root))
4354 goto out4; /* not a mountpoint */
4355 if (!mnt_has_parent(root_mnt))
4356 goto out4; /* not attached */
4357 if (!path_mounted(&new))
4358 goto out4; /* not a mountpoint */
4359 if (!mnt_has_parent(new_mnt))
4360 goto out4; /* not attached */
4361 /* make sure we can reach put_old from new_root */
4362 if (!is_path_reachable(old_mnt, old.dentry, &new))
4363 goto out4;
4364 /* make certain new is below the root */
4365 if (!is_path_reachable(new_mnt, new.dentry, &root))
4366 goto out4;
4367 lock_mount_hash();
4368 umount_mnt(new_mnt);
4369 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
4370 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4371 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4372 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4373 }
4374 /* mount old root on put_old */
4375 attach_mnt(root_mnt, old_mnt, old_mp, false);
4376 /* mount new_root on / */
4377 attach_mnt(new_mnt, root_parent, root_mp, false);
4378 mnt_add_count(root_parent, -1);
4379 touch_mnt_namespace(current->nsproxy->mnt_ns);
4380 /* A moved mount should not expire automatically */
4381 list_del_init(&new_mnt->mnt_expire);
4382 put_mountpoint(root_mp);
4383 unlock_mount_hash();
4384 chroot_fs_refs(&root, &new);
4385 error = 0;
4386 out4:
4387 unlock_mount(old_mp);
4388 if (!error)
4389 mntput_no_expire(ex_parent);
4390 out3:
4391 path_put(&root);
4392 out2:
4393 path_put(&old);
4394 out1:
4395 path_put(&new);
4396 out0:
4397 return error;
4398 }
4399
recalc_flags(struct mount_kattr * kattr,struct mount * mnt)4400 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4401 {
4402 unsigned int flags = mnt->mnt.mnt_flags;
4403
4404 /* flags to clear */
4405 flags &= ~kattr->attr_clr;
4406 /* flags to raise */
4407 flags |= kattr->attr_set;
4408
4409 return flags;
4410 }
4411
can_idmap_mount(const struct mount_kattr * kattr,struct mount * mnt)4412 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4413 {
4414 struct vfsmount *m = &mnt->mnt;
4415 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4416
4417 if (!kattr->mnt_idmap)
4418 return 0;
4419
4420 /*
4421 * Creating an idmapped mount with the filesystem wide idmapping
4422 * doesn't make sense so block that. We don't allow mushy semantics.
4423 */
4424 if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
4425 return -EINVAL;
4426
4427 /*
4428 * Once a mount has been idmapped we don't allow it to change its
4429 * mapping. It makes things simpler and callers can just create
4430 * another bind-mount they can idmap if they want to.
4431 */
4432 if (is_idmapped_mnt(m))
4433 return -EPERM;
4434
4435 /* The underlying filesystem doesn't support idmapped mounts yet. */
4436 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4437 return -EINVAL;
4438
4439 /* We're not controlling the superblock. */
4440 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4441 return -EPERM;
4442
4443 /* Mount has already been visible in the filesystem hierarchy. */
4444 if (!is_anon_ns(mnt->mnt_ns))
4445 return -EINVAL;
4446
4447 return 0;
4448 }
4449
4450 /**
4451 * mnt_allow_writers() - check whether the attribute change allows writers
4452 * @kattr: the new mount attributes
4453 * @mnt: the mount to which @kattr will be applied
4454 *
4455 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4456 *
4457 * Return: true if writers need to be held, false if not
4458 */
mnt_allow_writers(const struct mount_kattr * kattr,const struct mount * mnt)4459 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4460 const struct mount *mnt)
4461 {
4462 return (!(kattr->attr_set & MNT_READONLY) ||
4463 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4464 !kattr->mnt_idmap;
4465 }
4466
mount_setattr_prepare(struct mount_kattr * kattr,struct mount * mnt)4467 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4468 {
4469 struct mount *m;
4470 int err;
4471
4472 for (m = mnt; m; m = next_mnt(m, mnt)) {
4473 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4474 err = -EPERM;
4475 break;
4476 }
4477
4478 err = can_idmap_mount(kattr, m);
4479 if (err)
4480 break;
4481
4482 if (!mnt_allow_writers(kattr, m)) {
4483 err = mnt_hold_writers(m);
4484 if (err)
4485 break;
4486 }
4487
4488 if (!kattr->recurse)
4489 return 0;
4490 }
4491
4492 if (err) {
4493 struct mount *p;
4494
4495 /*
4496 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4497 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4498 * mounts and needs to take care to include the first mount.
4499 */
4500 for (p = mnt; p; p = next_mnt(p, mnt)) {
4501 /* If we had to hold writers unblock them. */
4502 if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4503 mnt_unhold_writers(p);
4504
4505 /*
4506 * We're done once the first mount we changed got
4507 * MNT_WRITE_HOLD unset.
4508 */
4509 if (p == m)
4510 break;
4511 }
4512 }
4513 return err;
4514 }
4515
do_idmap_mount(const struct mount_kattr * kattr,struct mount * mnt)4516 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4517 {
4518 if (!kattr->mnt_idmap)
4519 return;
4520
4521 /*
4522 * Pairs with smp_load_acquire() in mnt_idmap().
4523 *
4524 * Since we only allow a mount to change the idmapping once and
4525 * verified this in can_idmap_mount() we know that the mount has
4526 * @nop_mnt_idmap attached to it. So there's no need to drop any
4527 * references.
4528 */
4529 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4530 }
4531
mount_setattr_commit(struct mount_kattr * kattr,struct mount * mnt)4532 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4533 {
4534 struct mount *m;
4535
4536 for (m = mnt; m; m = next_mnt(m, mnt)) {
4537 unsigned int flags;
4538
4539 do_idmap_mount(kattr, m);
4540 flags = recalc_flags(kattr, m);
4541 WRITE_ONCE(m->mnt.mnt_flags, flags);
4542
4543 /* If we had to hold writers unblock them. */
4544 if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4545 mnt_unhold_writers(m);
4546
4547 if (kattr->propagation)
4548 change_mnt_propagation(m, kattr->propagation);
4549 if (!kattr->recurse)
4550 break;
4551 }
4552 touch_mnt_namespace(mnt->mnt_ns);
4553 }
4554
do_mount_setattr(struct path * path,struct mount_kattr * kattr)4555 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4556 {
4557 struct mount *mnt = real_mount(path->mnt);
4558 int err = 0;
4559
4560 if (!path_mounted(path))
4561 return -EINVAL;
4562
4563 if (kattr->mnt_userns) {
4564 struct mnt_idmap *mnt_idmap;
4565
4566 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4567 if (IS_ERR(mnt_idmap))
4568 return PTR_ERR(mnt_idmap);
4569 kattr->mnt_idmap = mnt_idmap;
4570 }
4571
4572 if (kattr->propagation) {
4573 /*
4574 * Only take namespace_lock() if we're actually changing
4575 * propagation.
4576 */
4577 namespace_lock();
4578 if (kattr->propagation == MS_SHARED) {
4579 err = invent_group_ids(mnt, kattr->recurse);
4580 if (err) {
4581 namespace_unlock();
4582 return err;
4583 }
4584 }
4585 }
4586
4587 err = -EINVAL;
4588 lock_mount_hash();
4589
4590 /* Ensure that this isn't anything purely vfs internal. */
4591 if (!is_mounted(&mnt->mnt))
4592 goto out;
4593
4594 /*
4595 * If this is an attached mount make sure it's located in the callers
4596 * mount namespace. If it's not don't let the caller interact with it.
4597 *
4598 * If this mount doesn't have a parent it's most often simply a
4599 * detached mount with an anonymous mount namespace. IOW, something
4600 * that's simply not attached yet. But there are apparently also users
4601 * that do change mount properties on the rootfs itself. That obviously
4602 * neither has a parent nor is it a detached mount so we cannot
4603 * unconditionally check for detached mounts.
4604 */
4605 if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
4606 goto out;
4607
4608 /*
4609 * First, we get the mount tree in a shape where we can change mount
4610 * properties without failure. If we succeeded to do so we commit all
4611 * changes and if we failed we clean up.
4612 */
4613 err = mount_setattr_prepare(kattr, mnt);
4614 if (!err)
4615 mount_setattr_commit(kattr, mnt);
4616
4617 out:
4618 unlock_mount_hash();
4619
4620 if (kattr->propagation) {
4621 if (err)
4622 cleanup_group_ids(mnt, NULL);
4623 namespace_unlock();
4624 }
4625
4626 return err;
4627 }
4628
build_mount_idmapped(const struct mount_attr * attr,size_t usize,struct mount_kattr * kattr,unsigned int flags)4629 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4630 struct mount_kattr *kattr, unsigned int flags)
4631 {
4632 int err = 0;
4633 struct ns_common *ns;
4634 struct user_namespace *mnt_userns;
4635 struct fd f;
4636
4637 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4638 return 0;
4639
4640 /*
4641 * We currently do not support clearing an idmapped mount. If this ever
4642 * is a use-case we can revisit this but for now let's keep it simple
4643 * and not allow it.
4644 */
4645 if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4646 return -EINVAL;
4647
4648 if (attr->userns_fd > INT_MAX)
4649 return -EINVAL;
4650
4651 f = fdget(attr->userns_fd);
4652 if (!f.file)
4653 return -EBADF;
4654
4655 if (!proc_ns_file(f.file)) {
4656 err = -EINVAL;
4657 goto out_fput;
4658 }
4659
4660 ns = get_proc_ns(file_inode(f.file));
4661 if (ns->ops->type != CLONE_NEWUSER) {
4662 err = -EINVAL;
4663 goto out_fput;
4664 }
4665
4666 /*
4667 * The initial idmapping cannot be used to create an idmapped
4668 * mount. We use the initial idmapping as an indicator of a mount
4669 * that is not idmapped. It can simply be passed into helpers that
4670 * are aware of idmapped mounts as a convenient shortcut. A user
4671 * can just create a dedicated identity mapping to achieve the same
4672 * result.
4673 */
4674 mnt_userns = container_of(ns, struct user_namespace, ns);
4675 if (mnt_userns == &init_user_ns) {
4676 err = -EPERM;
4677 goto out_fput;
4678 }
4679
4680 /* We're not controlling the target namespace. */
4681 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4682 err = -EPERM;
4683 goto out_fput;
4684 }
4685
4686 kattr->mnt_userns = get_user_ns(mnt_userns);
4687
4688 out_fput:
4689 fdput(f);
4690 return err;
4691 }
4692
build_mount_kattr(const struct mount_attr * attr,size_t usize,struct mount_kattr * kattr,unsigned int flags)4693 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4694 struct mount_kattr *kattr, unsigned int flags)
4695 {
4696 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4697
4698 if (flags & AT_NO_AUTOMOUNT)
4699 lookup_flags &= ~LOOKUP_AUTOMOUNT;
4700 if (flags & AT_SYMLINK_NOFOLLOW)
4701 lookup_flags &= ~LOOKUP_FOLLOW;
4702 if (flags & AT_EMPTY_PATH)
4703 lookup_flags |= LOOKUP_EMPTY;
4704
4705 *kattr = (struct mount_kattr) {
4706 .lookup_flags = lookup_flags,
4707 .recurse = !!(flags & AT_RECURSIVE),
4708 };
4709
4710 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4711 return -EINVAL;
4712 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4713 return -EINVAL;
4714 kattr->propagation = attr->propagation;
4715
4716 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4717 return -EINVAL;
4718
4719 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4720 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4721
4722 /*
4723 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4724 * users wanting to transition to a different atime setting cannot
4725 * simply specify the atime setting in @attr_set, but must also
4726 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4727 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4728 * @attr_clr and that @attr_set can't have any atime bits set if
4729 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4730 */
4731 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4732 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4733 return -EINVAL;
4734
4735 /*
4736 * Clear all previous time settings as they are mutually
4737 * exclusive.
4738 */
4739 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4740 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4741 case MOUNT_ATTR_RELATIME:
4742 kattr->attr_set |= MNT_RELATIME;
4743 break;
4744 case MOUNT_ATTR_NOATIME:
4745 kattr->attr_set |= MNT_NOATIME;
4746 break;
4747 case MOUNT_ATTR_STRICTATIME:
4748 break;
4749 default:
4750 return -EINVAL;
4751 }
4752 } else {
4753 if (attr->attr_set & MOUNT_ATTR__ATIME)
4754 return -EINVAL;
4755 }
4756
4757 return build_mount_idmapped(attr, usize, kattr, flags);
4758 }
4759
finish_mount_kattr(struct mount_kattr * kattr)4760 static void finish_mount_kattr(struct mount_kattr *kattr)
4761 {
4762 put_user_ns(kattr->mnt_userns);
4763 kattr->mnt_userns = NULL;
4764
4765 if (kattr->mnt_idmap)
4766 mnt_idmap_put(kattr->mnt_idmap);
4767 }
4768
SYSCALL_DEFINE5(mount_setattr,int,dfd,const char __user *,path,unsigned int,flags,struct mount_attr __user *,uattr,size_t,usize)4769 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4770 unsigned int, flags, struct mount_attr __user *, uattr,
4771 size_t, usize)
4772 {
4773 int err;
4774 struct path target;
4775 struct mount_attr attr;
4776 struct mount_kattr kattr;
4777
4778 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4779
4780 if (flags & ~(AT_EMPTY_PATH |
4781 AT_RECURSIVE |
4782 AT_SYMLINK_NOFOLLOW |
4783 AT_NO_AUTOMOUNT))
4784 return -EINVAL;
4785
4786 if (unlikely(usize > PAGE_SIZE))
4787 return -E2BIG;
4788 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4789 return -EINVAL;
4790
4791 if (!may_mount())
4792 return -EPERM;
4793
4794 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4795 if (err)
4796 return err;
4797
4798 /* Don't bother walking through the mounts if this is a nop. */
4799 if (attr.attr_set == 0 &&
4800 attr.attr_clr == 0 &&
4801 attr.propagation == 0)
4802 return 0;
4803
4804 err = build_mount_kattr(&attr, usize, &kattr, flags);
4805 if (err)
4806 return err;
4807
4808 err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4809 if (!err) {
4810 err = do_mount_setattr(&target, &kattr);
4811 path_put(&target);
4812 }
4813 finish_mount_kattr(&kattr);
4814 return err;
4815 }
4816
show_path(struct seq_file * m,struct dentry * root)4817 int show_path(struct seq_file *m, struct dentry *root)
4818 {
4819 if (root->d_sb->s_op->show_path)
4820 return root->d_sb->s_op->show_path(m, root);
4821
4822 seq_dentry(m, root, " \t\n\\");
4823 return 0;
4824 }
4825
lookup_mnt_in_ns(u64 id,struct mnt_namespace * ns)4826 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
4827 {
4828 struct mount *mnt = mnt_find_id_at(ns, id);
4829
4830 if (!mnt || mnt->mnt_id_unique != id)
4831 return NULL;
4832
4833 return &mnt->mnt;
4834 }
4835
4836 struct kstatmount {
4837 struct statmount __user *buf;
4838 size_t bufsize;
4839 struct vfsmount *mnt;
4840 u64 mask;
4841 struct path root;
4842 struct statmount sm;
4843 struct seq_file seq;
4844 };
4845
mnt_to_attr_flags(struct vfsmount * mnt)4846 static u64 mnt_to_attr_flags(struct vfsmount *mnt)
4847 {
4848 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
4849 u64 attr_flags = 0;
4850
4851 if (mnt_flags & MNT_READONLY)
4852 attr_flags |= MOUNT_ATTR_RDONLY;
4853 if (mnt_flags & MNT_NOSUID)
4854 attr_flags |= MOUNT_ATTR_NOSUID;
4855 if (mnt_flags & MNT_NODEV)
4856 attr_flags |= MOUNT_ATTR_NODEV;
4857 if (mnt_flags & MNT_NOEXEC)
4858 attr_flags |= MOUNT_ATTR_NOEXEC;
4859 if (mnt_flags & MNT_NODIRATIME)
4860 attr_flags |= MOUNT_ATTR_NODIRATIME;
4861 if (mnt_flags & MNT_NOSYMFOLLOW)
4862 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
4863
4864 if (mnt_flags & MNT_NOATIME)
4865 attr_flags |= MOUNT_ATTR_NOATIME;
4866 else if (mnt_flags & MNT_RELATIME)
4867 attr_flags |= MOUNT_ATTR_RELATIME;
4868 else
4869 attr_flags |= MOUNT_ATTR_STRICTATIME;
4870
4871 if (is_idmapped_mnt(mnt))
4872 attr_flags |= MOUNT_ATTR_IDMAP;
4873
4874 return attr_flags;
4875 }
4876
mnt_to_propagation_flags(struct mount * m)4877 static u64 mnt_to_propagation_flags(struct mount *m)
4878 {
4879 u64 propagation = 0;
4880
4881 if (IS_MNT_SHARED(m))
4882 propagation |= MS_SHARED;
4883 if (IS_MNT_SLAVE(m))
4884 propagation |= MS_SLAVE;
4885 if (IS_MNT_UNBINDABLE(m))
4886 propagation |= MS_UNBINDABLE;
4887 if (!propagation)
4888 propagation |= MS_PRIVATE;
4889
4890 return propagation;
4891 }
4892
statmount_sb_basic(struct kstatmount * s)4893 static void statmount_sb_basic(struct kstatmount *s)
4894 {
4895 struct super_block *sb = s->mnt->mnt_sb;
4896
4897 s->sm.mask |= STATMOUNT_SB_BASIC;
4898 s->sm.sb_dev_major = MAJOR(sb->s_dev);
4899 s->sm.sb_dev_minor = MINOR(sb->s_dev);
4900 s->sm.sb_magic = sb->s_magic;
4901 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
4902 }
4903
statmount_mnt_basic(struct kstatmount * s)4904 static void statmount_mnt_basic(struct kstatmount *s)
4905 {
4906 struct mount *m = real_mount(s->mnt);
4907
4908 s->sm.mask |= STATMOUNT_MNT_BASIC;
4909 s->sm.mnt_id = m->mnt_id_unique;
4910 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
4911 s->sm.mnt_id_old = m->mnt_id;
4912 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
4913 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
4914 s->sm.mnt_propagation = mnt_to_propagation_flags(m);
4915 s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0;
4916 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
4917 }
4918
statmount_propagate_from(struct kstatmount * s)4919 static void statmount_propagate_from(struct kstatmount *s)
4920 {
4921 struct mount *m = real_mount(s->mnt);
4922
4923 s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
4924 if (IS_MNT_SLAVE(m))
4925 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root);
4926 }
4927
statmount_mnt_root(struct kstatmount * s,struct seq_file * seq)4928 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
4929 {
4930 int ret;
4931 size_t start = seq->count;
4932
4933 ret = show_path(seq, s->mnt->mnt_root);
4934 if (ret)
4935 return ret;
4936
4937 if (unlikely(seq_has_overflowed(seq)))
4938 return -EAGAIN;
4939
4940 /*
4941 * Unescape the result. It would be better if supplied string was not
4942 * escaped in the first place, but that's a pretty invasive change.
4943 */
4944 seq->buf[seq->count] = '\0';
4945 seq->count = start;
4946 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
4947 return 0;
4948 }
4949
statmount_mnt_point(struct kstatmount * s,struct seq_file * seq)4950 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
4951 {
4952 struct vfsmount *mnt = s->mnt;
4953 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
4954 int err;
4955
4956 err = seq_path_root(seq, &mnt_path, &s->root, "");
4957 return err == SEQ_SKIP ? 0 : err;
4958 }
4959
statmount_fs_type(struct kstatmount * s,struct seq_file * seq)4960 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
4961 {
4962 struct super_block *sb = s->mnt->mnt_sb;
4963
4964 seq_puts(seq, sb->s_type->name);
4965 return 0;
4966 }
4967
statmount_mnt_ns_id(struct kstatmount * s,struct mnt_namespace * ns)4968 static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
4969 {
4970 s->sm.mask |= STATMOUNT_MNT_NS_ID;
4971 s->sm.mnt_ns_id = ns->seq;
4972 }
4973
statmount_mnt_opts(struct kstatmount * s,struct seq_file * seq)4974 static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
4975 {
4976 struct vfsmount *mnt = s->mnt;
4977 struct super_block *sb = mnt->mnt_sb;
4978 int err;
4979
4980 if (sb->s_op->show_options) {
4981 size_t start = seq->count;
4982
4983 err = sb->s_op->show_options(seq, mnt->mnt_root);
4984 if (err)
4985 return err;
4986
4987 if (unlikely(seq_has_overflowed(seq)))
4988 return -EAGAIN;
4989
4990 if (seq->count == start)
4991 return 0;
4992
4993 /* skip leading comma */
4994 memmove(seq->buf + start, seq->buf + start + 1,
4995 seq->count - start - 1);
4996 seq->count--;
4997 }
4998
4999 return 0;
5000 }
5001
statmount_string(struct kstatmount * s,u64 flag)5002 static int statmount_string(struct kstatmount *s, u64 flag)
5003 {
5004 int ret;
5005 size_t kbufsize;
5006 struct seq_file *seq = &s->seq;
5007 struct statmount *sm = &s->sm;
5008
5009 switch (flag) {
5010 case STATMOUNT_FS_TYPE:
5011 sm->fs_type = seq->count;
5012 ret = statmount_fs_type(s, seq);
5013 break;
5014 case STATMOUNT_MNT_ROOT:
5015 sm->mnt_root = seq->count;
5016 ret = statmount_mnt_root(s, seq);
5017 break;
5018 case STATMOUNT_MNT_POINT:
5019 sm->mnt_point = seq->count;
5020 ret = statmount_mnt_point(s, seq);
5021 break;
5022 case STATMOUNT_MNT_OPTS:
5023 sm->mnt_opts = seq->count;
5024 ret = statmount_mnt_opts(s, seq);
5025 break;
5026 default:
5027 WARN_ON_ONCE(true);
5028 return -EINVAL;
5029 }
5030
5031 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
5032 return -EOVERFLOW;
5033 if (kbufsize >= s->bufsize)
5034 return -EOVERFLOW;
5035
5036 /* signal a retry */
5037 if (unlikely(seq_has_overflowed(seq)))
5038 return -EAGAIN;
5039
5040 if (ret)
5041 return ret;
5042
5043 seq->buf[seq->count++] = '\0';
5044 sm->mask |= flag;
5045 return 0;
5046 }
5047
copy_statmount_to_user(struct kstatmount * s)5048 static int copy_statmount_to_user(struct kstatmount *s)
5049 {
5050 struct statmount *sm = &s->sm;
5051 struct seq_file *seq = &s->seq;
5052 char __user *str = ((char __user *)s->buf) + sizeof(*sm);
5053 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
5054
5055 if (seq->count && copy_to_user(str, seq->buf, seq->count))
5056 return -EFAULT;
5057
5058 /* Return the number of bytes copied to the buffer */
5059 sm->size = copysize + seq->count;
5060 if (copy_to_user(s->buf, sm, copysize))
5061 return -EFAULT;
5062
5063 return 0;
5064 }
5065
listmnt_next(struct mount * curr,bool reverse)5066 static struct mount *listmnt_next(struct mount *curr, bool reverse)
5067 {
5068 struct rb_node *node;
5069
5070 if (reverse)
5071 node = rb_prev(&curr->mnt_node);
5072 else
5073 node = rb_next(&curr->mnt_node);
5074
5075 return node_to_mount(node);
5076 }
5077
grab_requested_root(struct mnt_namespace * ns,struct path * root)5078 static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
5079 {
5080 struct mount *first, *child;
5081
5082 rwsem_assert_held(&namespace_sem);
5083
5084 /* We're looking at our own ns, just use get_fs_root. */
5085 if (ns == current->nsproxy->mnt_ns) {
5086 get_fs_root(current->fs, root);
5087 return 0;
5088 }
5089
5090 /*
5091 * We have to find the first mount in our ns and use that, however it
5092 * may not exist, so handle that properly.
5093 */
5094 if (RB_EMPTY_ROOT(&ns->mounts))
5095 return -ENOENT;
5096
5097 first = child = ns->root;
5098 for (;;) {
5099 child = listmnt_next(child, false);
5100 if (!child)
5101 return -ENOENT;
5102 if (child->mnt_parent == first)
5103 break;
5104 }
5105
5106 root->mnt = mntget(&child->mnt);
5107 root->dentry = dget(root->mnt->mnt_root);
5108 return 0;
5109 }
5110
do_statmount(struct kstatmount * s,u64 mnt_id,u64 mnt_ns_id,struct mnt_namespace * ns)5111 static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
5112 struct mnt_namespace *ns)
5113 {
5114 struct path root __free(path_put) = {};
5115 struct mount *m;
5116 int err;
5117
5118 /* Has the namespace already been emptied? */
5119 if (mnt_ns_id && RB_EMPTY_ROOT(&ns->mounts))
5120 return -ENOENT;
5121
5122 s->mnt = lookup_mnt_in_ns(mnt_id, ns);
5123 if (!s->mnt)
5124 return -ENOENT;
5125
5126 err = grab_requested_root(ns, &root);
5127 if (err)
5128 return err;
5129
5130 /*
5131 * Don't trigger audit denials. We just want to determine what
5132 * mounts to show users.
5133 */
5134 m = real_mount(s->mnt);
5135 if (!is_path_reachable(m, m->mnt.mnt_root, &root) &&
5136 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5137 return -EPERM;
5138
5139 err = security_sb_statfs(s->mnt->mnt_root);
5140 if (err)
5141 return err;
5142
5143 s->root = root;
5144 if (s->mask & STATMOUNT_SB_BASIC)
5145 statmount_sb_basic(s);
5146
5147 if (s->mask & STATMOUNT_MNT_BASIC)
5148 statmount_mnt_basic(s);
5149
5150 if (s->mask & STATMOUNT_PROPAGATE_FROM)
5151 statmount_propagate_from(s);
5152
5153 if (s->mask & STATMOUNT_FS_TYPE)
5154 err = statmount_string(s, STATMOUNT_FS_TYPE);
5155
5156 if (!err && s->mask & STATMOUNT_MNT_ROOT)
5157 err = statmount_string(s, STATMOUNT_MNT_ROOT);
5158
5159 if (!err && s->mask & STATMOUNT_MNT_POINT)
5160 err = statmount_string(s, STATMOUNT_MNT_POINT);
5161
5162 if (!err && s->mask & STATMOUNT_MNT_OPTS)
5163 err = statmount_string(s, STATMOUNT_MNT_OPTS);
5164
5165 if (!err && s->mask & STATMOUNT_MNT_NS_ID)
5166 statmount_mnt_ns_id(s, ns);
5167
5168 if (err)
5169 return err;
5170
5171 return 0;
5172 }
5173
retry_statmount(const long ret,size_t * seq_size)5174 static inline bool retry_statmount(const long ret, size_t *seq_size)
5175 {
5176 if (likely(ret != -EAGAIN))
5177 return false;
5178 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
5179 return false;
5180 if (unlikely(*seq_size > MAX_RW_COUNT))
5181 return false;
5182 return true;
5183 }
5184
5185 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
5186 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS)
5187
prepare_kstatmount(struct kstatmount * ks,struct mnt_id_req * kreq,struct statmount __user * buf,size_t bufsize,size_t seq_size)5188 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
5189 struct statmount __user *buf, size_t bufsize,
5190 size_t seq_size)
5191 {
5192 if (!access_ok(buf, bufsize))
5193 return -EFAULT;
5194
5195 memset(ks, 0, sizeof(*ks));
5196 ks->mask = kreq->param;
5197 ks->buf = buf;
5198 ks->bufsize = bufsize;
5199
5200 if (ks->mask & STATMOUNT_STRING_REQ) {
5201 if (bufsize == sizeof(ks->sm))
5202 return -EOVERFLOW;
5203
5204 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
5205 if (!ks->seq.buf)
5206 return -ENOMEM;
5207
5208 ks->seq.size = seq_size;
5209 }
5210
5211 return 0;
5212 }
5213
copy_mnt_id_req(const struct mnt_id_req __user * req,struct mnt_id_req * kreq)5214 static int copy_mnt_id_req(const struct mnt_id_req __user *req,
5215 struct mnt_id_req *kreq)
5216 {
5217 int ret;
5218 size_t usize;
5219
5220 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1);
5221
5222 ret = get_user(usize, &req->size);
5223 if (ret)
5224 return -EFAULT;
5225 if (unlikely(usize > PAGE_SIZE))
5226 return -E2BIG;
5227 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
5228 return -EINVAL;
5229 memset(kreq, 0, sizeof(*kreq));
5230 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
5231 if (ret)
5232 return ret;
5233 if (kreq->spare != 0)
5234 return -EINVAL;
5235 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5236 if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
5237 return -EINVAL;
5238 return 0;
5239 }
5240
5241 /*
5242 * If the user requested a specific mount namespace id, look that up and return
5243 * that, or if not simply grab a passive reference on our mount namespace and
5244 * return that.
5245 */
grab_requested_mnt_ns(u64 mnt_ns_id)5246 static struct mnt_namespace *grab_requested_mnt_ns(u64 mnt_ns_id)
5247 {
5248 if (mnt_ns_id)
5249 return lookup_mnt_ns(mnt_ns_id);
5250 refcount_inc(¤t->nsproxy->mnt_ns->passive);
5251 return current->nsproxy->mnt_ns;
5252 }
5253
SYSCALL_DEFINE4(statmount,const struct mnt_id_req __user *,req,struct statmount __user *,buf,size_t,bufsize,unsigned int,flags)5254 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
5255 struct statmount __user *, buf, size_t, bufsize,
5256 unsigned int, flags)
5257 {
5258 struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
5259 struct kstatmount *ks __free(kfree) = NULL;
5260 struct mnt_id_req kreq;
5261 /* We currently support retrieval of 3 strings. */
5262 size_t seq_size = 3 * PATH_MAX;
5263 int ret;
5264
5265 if (flags)
5266 return -EINVAL;
5267
5268 ret = copy_mnt_id_req(req, &kreq);
5269 if (ret)
5270 return ret;
5271
5272 ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
5273 if (!ns)
5274 return -ENOENT;
5275
5276 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
5277 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5278 return -ENOENT;
5279
5280 ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT);
5281 if (!ks)
5282 return -ENOMEM;
5283
5284 retry:
5285 ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size);
5286 if (ret)
5287 return ret;
5288
5289 scoped_guard(rwsem_read, &namespace_sem)
5290 ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns);
5291
5292 if (!ret)
5293 ret = copy_statmount_to_user(ks);
5294 kvfree(ks->seq.buf);
5295 if (retry_statmount(ret, &seq_size))
5296 goto retry;
5297 return ret;
5298 }
5299
do_listmount(struct mnt_namespace * ns,u64 mnt_parent_id,u64 last_mnt_id,u64 * mnt_ids,size_t nr_mnt_ids,bool reverse)5300 static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
5301 u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids,
5302 bool reverse)
5303 {
5304 struct path root __free(path_put) = {};
5305 struct path orig;
5306 struct mount *r, *first;
5307 ssize_t ret;
5308
5309 rwsem_assert_held(&namespace_sem);
5310
5311 ret = grab_requested_root(ns, &root);
5312 if (ret)
5313 return ret;
5314
5315 if (mnt_parent_id == LSMT_ROOT) {
5316 orig = root;
5317 } else {
5318 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
5319 if (!orig.mnt)
5320 return -ENOENT;
5321 orig.dentry = orig.mnt->mnt_root;
5322 }
5323
5324 /*
5325 * Don't trigger audit denials. We just want to determine what
5326 * mounts to show users.
5327 */
5328 if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) &&
5329 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5330 return -EPERM;
5331
5332 ret = security_sb_statfs(orig.dentry);
5333 if (ret)
5334 return ret;
5335
5336 if (!last_mnt_id) {
5337 if (reverse)
5338 first = node_to_mount(rb_last(&ns->mounts));
5339 else
5340 first = node_to_mount(rb_first(&ns->mounts));
5341 } else {
5342 if (reverse)
5343 first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
5344 else
5345 first = mnt_find_id_at(ns, last_mnt_id + 1);
5346 }
5347
5348 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) {
5349 if (r->mnt_id_unique == mnt_parent_id)
5350 continue;
5351 if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
5352 continue;
5353 *mnt_ids = r->mnt_id_unique;
5354 mnt_ids++;
5355 nr_mnt_ids--;
5356 ret++;
5357 }
5358 return ret;
5359 }
5360
SYSCALL_DEFINE4(listmount,const struct mnt_id_req __user *,req,u64 __user *,mnt_ids,size_t,nr_mnt_ids,unsigned int,flags)5361 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
5362 u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
5363 {
5364 u64 *kmnt_ids __free(kvfree) = NULL;
5365 const size_t maxcount = 1000000;
5366 struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
5367 struct mnt_id_req kreq;
5368 u64 last_mnt_id;
5369 ssize_t ret;
5370
5371 if (flags & ~LISTMOUNT_REVERSE)
5372 return -EINVAL;
5373
5374 /*
5375 * If the mount namespace really has more than 1 million mounts the
5376 * caller must iterate over the mount namespace (and reconsider their
5377 * system design...).
5378 */
5379 if (unlikely(nr_mnt_ids > maxcount))
5380 return -EOVERFLOW;
5381
5382 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
5383 return -EFAULT;
5384
5385 ret = copy_mnt_id_req(req, &kreq);
5386 if (ret)
5387 return ret;
5388
5389 last_mnt_id = kreq.param;
5390 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5391 if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
5392 return -EINVAL;
5393
5394 kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids),
5395 GFP_KERNEL_ACCOUNT);
5396 if (!kmnt_ids)
5397 return -ENOMEM;
5398
5399 ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
5400 if (!ns)
5401 return -ENOENT;
5402
5403 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
5404 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5405 return -ENOENT;
5406
5407 scoped_guard(rwsem_read, &namespace_sem)
5408 ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids,
5409 nr_mnt_ids, (flags & LISTMOUNT_REVERSE));
5410 if (ret <= 0)
5411 return ret;
5412
5413 if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids)))
5414 return -EFAULT;
5415
5416 return ret;
5417 }
5418
init_mount_tree(void)5419 static void __init init_mount_tree(void)
5420 {
5421 struct vfsmount *mnt;
5422 struct mount *m;
5423 struct mnt_namespace *ns;
5424 struct path root;
5425
5426 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
5427 if (IS_ERR(mnt))
5428 panic("Can't create rootfs");
5429
5430 ns = alloc_mnt_ns(&init_user_ns, false);
5431 if (IS_ERR(ns))
5432 panic("Can't allocate initial namespace");
5433 m = real_mount(mnt);
5434 ns->root = m;
5435 ns->nr_mounts = 1;
5436 mnt_add_to_ns(ns, m);
5437 init_task.nsproxy->mnt_ns = ns;
5438 get_mnt_ns(ns);
5439
5440 root.mnt = mnt;
5441 root.dentry = mnt->mnt_root;
5442 mnt->mnt_flags |= MNT_LOCKED;
5443
5444 set_fs_pwd(current->fs, &root);
5445 set_fs_root(current->fs, &root);
5446
5447 mnt_ns_tree_add(ns);
5448 }
5449
mnt_init(void)5450 void __init mnt_init(void)
5451 {
5452 int err;
5453
5454 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
5455 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
5456
5457 mount_hashtable = alloc_large_system_hash("Mount-cache",
5458 sizeof(struct hlist_head),
5459 mhash_entries, 19,
5460 HASH_ZERO,
5461 &m_hash_shift, &m_hash_mask, 0, 0);
5462 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
5463 sizeof(struct hlist_head),
5464 mphash_entries, 19,
5465 HASH_ZERO,
5466 &mp_hash_shift, &mp_hash_mask, 0, 0);
5467
5468 if (!mount_hashtable || !mountpoint_hashtable)
5469 panic("Failed to allocate mount hash table\n");
5470
5471 kernfs_init();
5472
5473 err = sysfs_init();
5474 if (err)
5475 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
5476 __func__, err);
5477 fs_kobj = kobject_create_and_add("fs", NULL);
5478 if (!fs_kobj)
5479 printk(KERN_WARNING "%s: kobj create error\n", __func__);
5480 shmem_init();
5481 init_rootfs();
5482 init_mount_tree();
5483 }
5484
put_mnt_ns(struct mnt_namespace * ns)5485 void put_mnt_ns(struct mnt_namespace *ns)
5486 {
5487 if (!refcount_dec_and_test(&ns->ns.count))
5488 return;
5489 drop_collected_mounts(&ns->root->mnt);
5490 free_mnt_ns(ns);
5491 }
5492
kern_mount(struct file_system_type * type)5493 struct vfsmount *kern_mount(struct file_system_type *type)
5494 {
5495 struct vfsmount *mnt;
5496 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
5497 if (!IS_ERR(mnt)) {
5498 /*
5499 * it is a longterm mount, don't release mnt until
5500 * we unmount before file sys is unregistered
5501 */
5502 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
5503 }
5504 return mnt;
5505 }
5506 EXPORT_SYMBOL_GPL(kern_mount);
5507
kern_unmount(struct vfsmount * mnt)5508 void kern_unmount(struct vfsmount *mnt)
5509 {
5510 /* release long term mount so mount point can be released */
5511 if (!IS_ERR(mnt)) {
5512 mnt_make_shortterm(mnt);
5513 synchronize_rcu(); /* yecchhh... */
5514 mntput(mnt);
5515 }
5516 }
5517 EXPORT_SYMBOL(kern_unmount);
5518
kern_unmount_array(struct vfsmount * mnt[],unsigned int num)5519 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
5520 {
5521 unsigned int i;
5522
5523 for (i = 0; i < num; i++)
5524 mnt_make_shortterm(mnt[i]);
5525 synchronize_rcu_expedited();
5526 for (i = 0; i < num; i++)
5527 mntput(mnt[i]);
5528 }
5529 EXPORT_SYMBOL(kern_unmount_array);
5530
our_mnt(struct vfsmount * mnt)5531 bool our_mnt(struct vfsmount *mnt)
5532 {
5533 return check_mnt(real_mount(mnt));
5534 }
5535
current_chrooted(void)5536 bool current_chrooted(void)
5537 {
5538 /* Does the current process have a non-standard root */
5539 struct path ns_root;
5540 struct path fs_root;
5541 bool chrooted;
5542
5543 /* Find the namespace root */
5544 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
5545 ns_root.dentry = ns_root.mnt->mnt_root;
5546 path_get(&ns_root);
5547 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
5548 ;
5549
5550 get_fs_root(current->fs, &fs_root);
5551
5552 chrooted = !path_equal(&fs_root, &ns_root);
5553
5554 path_put(&fs_root);
5555 path_put(&ns_root);
5556
5557 return chrooted;
5558 }
5559
mnt_already_visible(struct mnt_namespace * ns,const struct super_block * sb,int * new_mnt_flags)5560 static bool mnt_already_visible(struct mnt_namespace *ns,
5561 const struct super_block *sb,
5562 int *new_mnt_flags)
5563 {
5564 int new_flags = *new_mnt_flags;
5565 struct mount *mnt, *n;
5566 bool visible = false;
5567
5568 down_read(&namespace_sem);
5569 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
5570 struct mount *child;
5571 int mnt_flags;
5572
5573 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
5574 continue;
5575
5576 /* This mount is not fully visible if it's root directory
5577 * is not the root directory of the filesystem.
5578 */
5579 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
5580 continue;
5581
5582 /* A local view of the mount flags */
5583 mnt_flags = mnt->mnt.mnt_flags;
5584
5585 /* Don't miss readonly hidden in the superblock flags */
5586 if (sb_rdonly(mnt->mnt.mnt_sb))
5587 mnt_flags |= MNT_LOCK_READONLY;
5588
5589 /* Verify the mount flags are equal to or more permissive
5590 * than the proposed new mount.
5591 */
5592 if ((mnt_flags & MNT_LOCK_READONLY) &&
5593 !(new_flags & MNT_READONLY))
5594 continue;
5595 if ((mnt_flags & MNT_LOCK_ATIME) &&
5596 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
5597 continue;
5598
5599 /* This mount is not fully visible if there are any
5600 * locked child mounts that cover anything except for
5601 * empty directories.
5602 */
5603 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
5604 struct inode *inode = child->mnt_mountpoint->d_inode;
5605 /* Only worry about locked mounts */
5606 if (!(child->mnt.mnt_flags & MNT_LOCKED))
5607 continue;
5608 /* Is the directory permanetly empty? */
5609 if (!is_empty_dir_inode(inode))
5610 goto next;
5611 }
5612 /* Preserve the locked attributes */
5613 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
5614 MNT_LOCK_ATIME);
5615 visible = true;
5616 goto found;
5617 next: ;
5618 }
5619 found:
5620 up_read(&namespace_sem);
5621 return visible;
5622 }
5623
mount_too_revealing(const struct super_block * sb,int * new_mnt_flags)5624 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
5625 {
5626 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
5627 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
5628 unsigned long s_iflags;
5629
5630 if (ns->user_ns == &init_user_ns)
5631 return false;
5632
5633 /* Can this filesystem be too revealing? */
5634 s_iflags = sb->s_iflags;
5635 if (!(s_iflags & SB_I_USERNS_VISIBLE))
5636 return false;
5637
5638 if ((s_iflags & required_iflags) != required_iflags) {
5639 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
5640 required_iflags);
5641 return true;
5642 }
5643
5644 return !mnt_already_visible(ns, sb, new_mnt_flags);
5645 }
5646
mnt_may_suid(struct vfsmount * mnt)5647 bool mnt_may_suid(struct vfsmount *mnt)
5648 {
5649 /*
5650 * Foreign mounts (accessed via fchdir or through /proc
5651 * symlinks) are always treated as if they are nosuid. This
5652 * prevents namespaces from trusting potentially unsafe
5653 * suid/sgid bits, file caps, or security labels that originate
5654 * in other namespaces.
5655 */
5656 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
5657 current_in_userns(mnt->mnt_sb->s_user_ns);
5658 }
5659
mntns_get(struct task_struct * task)5660 static struct ns_common *mntns_get(struct task_struct *task)
5661 {
5662 struct ns_common *ns = NULL;
5663 struct nsproxy *nsproxy;
5664
5665 task_lock(task);
5666 nsproxy = task->nsproxy;
5667 if (nsproxy) {
5668 ns = &nsproxy->mnt_ns->ns;
5669 get_mnt_ns(to_mnt_ns(ns));
5670 }
5671 task_unlock(task);
5672
5673 return ns;
5674 }
5675
mntns_put(struct ns_common * ns)5676 static void mntns_put(struct ns_common *ns)
5677 {
5678 put_mnt_ns(to_mnt_ns(ns));
5679 }
5680
mntns_install(struct nsset * nsset,struct ns_common * ns)5681 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
5682 {
5683 struct nsproxy *nsproxy = nsset->nsproxy;
5684 struct fs_struct *fs = nsset->fs;
5685 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
5686 struct user_namespace *user_ns = nsset->cred->user_ns;
5687 struct path root;
5688 int err;
5689
5690 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
5691 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
5692 !ns_capable(user_ns, CAP_SYS_ADMIN))
5693 return -EPERM;
5694
5695 if (is_anon_ns(mnt_ns))
5696 return -EINVAL;
5697
5698 if (fs->users != 1)
5699 return -EINVAL;
5700
5701 get_mnt_ns(mnt_ns);
5702 old_mnt_ns = nsproxy->mnt_ns;
5703 nsproxy->mnt_ns = mnt_ns;
5704
5705 /* Find the root */
5706 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
5707 "/", LOOKUP_DOWN, &root);
5708 if (err) {
5709 /* revert to old namespace */
5710 nsproxy->mnt_ns = old_mnt_ns;
5711 put_mnt_ns(mnt_ns);
5712 return err;
5713 }
5714
5715 put_mnt_ns(old_mnt_ns);
5716
5717 /* Update the pwd and root */
5718 set_fs_pwd(fs, &root);
5719 set_fs_root(fs, &root);
5720
5721 path_put(&root);
5722 return 0;
5723 }
5724
mntns_owner(struct ns_common * ns)5725 static struct user_namespace *mntns_owner(struct ns_common *ns)
5726 {
5727 return to_mnt_ns(ns)->user_ns;
5728 }
5729
5730 const struct proc_ns_operations mntns_operations = {
5731 .name = "mnt",
5732 .type = CLONE_NEWNS,
5733 .get = mntns_get,
5734 .put = mntns_put,
5735 .install = mntns_install,
5736 .owner = mntns_owner,
5737 };
5738
5739 #ifdef CONFIG_SYSCTL
5740 static struct ctl_table fs_namespace_sysctls[] = {
5741 {
5742 .procname = "mount-max",
5743 .data = &sysctl_mount_max,
5744 .maxlen = sizeof(unsigned int),
5745 .mode = 0644,
5746 .proc_handler = proc_dointvec_minmax,
5747 .extra1 = SYSCTL_ONE,
5748 },
5749 };
5750
init_fs_namespace_sysctls(void)5751 static int __init init_fs_namespace_sysctls(void)
5752 {
5753 register_sysctl_init("fs", fs_namespace_sysctls);
5754 return 0;
5755 }
5756 fs_initcall(init_fs_namespace_sysctls);
5757
5758 #endif /* CONFIG_SYSCTL */
5759