xref: /linux/security/landlock/fs.c (revision 88da52cc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock LSM - Filesystem management and hooks
4  *
5  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6  * Copyright © 2018-2020 ANSSI
7  * Copyright © 2021-2022 Microsoft Corporation
8  * Copyright © 2022 Günther Noack <gnoack3000@gmail.com>
9  * Copyright © 2023-2024 Google LLC
10  */
11 
12 #include <asm/ioctls.h>
13 #include <kunit/test.h>
14 #include <linux/atomic.h>
15 #include <linux/bitops.h>
16 #include <linux/bits.h>
17 #include <linux/compiler_types.h>
18 #include <linux/dcache.h>
19 #include <linux/err.h>
20 #include <linux/falloc.h>
21 #include <linux/fs.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/limits.h>
25 #include <linux/list.h>
26 #include <linux/lsm_hooks.h>
27 #include <linux/mount.h>
28 #include <linux/namei.h>
29 #include <linux/path.h>
30 #include <linux/rcupdate.h>
31 #include <linux/spinlock.h>
32 #include <linux/stat.h>
33 #include <linux/types.h>
34 #include <linux/wait_bit.h>
35 #include <linux/workqueue.h>
36 #include <uapi/linux/fiemap.h>
37 #include <uapi/linux/landlock.h>
38 
39 #include "common.h"
40 #include "cred.h"
41 #include "fs.h"
42 #include "limits.h"
43 #include "object.h"
44 #include "ruleset.h"
45 #include "setup.h"
46 
47 /* Underlying object management */
48 
release_inode(struct landlock_object * const object)49 static void release_inode(struct landlock_object *const object)
50 	__releases(object->lock)
51 {
52 	struct inode *const inode = object->underobj;
53 	struct super_block *sb;
54 
55 	if (!inode) {
56 		spin_unlock(&object->lock);
57 		return;
58 	}
59 
60 	/*
61 	 * Protects against concurrent use by hook_sb_delete() of the reference
62 	 * to the underlying inode.
63 	 */
64 	object->underobj = NULL;
65 	/*
66 	 * Makes sure that if the filesystem is concurrently unmounted,
67 	 * hook_sb_delete() will wait for us to finish iput().
68 	 */
69 	sb = inode->i_sb;
70 	atomic_long_inc(&landlock_superblock(sb)->inode_refs);
71 	spin_unlock(&object->lock);
72 	/*
73 	 * Because object->underobj was not NULL, hook_sb_delete() and
74 	 * get_inode_object() guarantee that it is safe to reset
75 	 * landlock_inode(inode)->object while it is not NULL.  It is therefore
76 	 * not necessary to lock inode->i_lock.
77 	 */
78 	rcu_assign_pointer(landlock_inode(inode)->object, NULL);
79 	/*
80 	 * Now, new rules can safely be tied to @inode with get_inode_object().
81 	 */
82 
83 	iput(inode);
84 	if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
85 		wake_up_var(&landlock_superblock(sb)->inode_refs);
86 }
87 
88 static const struct landlock_object_underops landlock_fs_underops = {
89 	.release = release_inode
90 };
91 
92 /* IOCTL helpers */
93 
94 /**
95  * is_masked_device_ioctl - Determine whether an IOCTL command is always
96  * permitted with Landlock for device files.  These commands can not be
97  * restricted on device files by enforcing a Landlock policy.
98  *
99  * @cmd: The IOCTL command that is supposed to be run.
100  *
101  * By default, any IOCTL on a device file requires the
102  * LANDLOCK_ACCESS_FS_IOCTL_DEV right.  However, we blanket-permit some
103  * commands, if:
104  *
105  * 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
106  *    not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
107  *
108  * 2. The command is harmless when invoked on devices.
109  *
110  * We also permit commands that do not make sense for devices, but where the
111  * do_vfs_ioctl() implementation returns a more conventional error code.
112  *
113  * Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
114  * should be considered for inclusion here.
115  *
116  * Returns: true if the IOCTL @cmd can not be restricted with Landlock for
117  * device files.
118  */
is_masked_device_ioctl(const unsigned int cmd)119 static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
120 {
121 	switch (cmd) {
122 	/*
123 	 * FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
124 	 * close-on-exec and the file's buffered-IO and async flags.  These
125 	 * operations are also available through fcntl(2), and are
126 	 * unconditionally permitted in Landlock.
127 	 */
128 	case FIOCLEX:
129 	case FIONCLEX:
130 	case FIONBIO:
131 	case FIOASYNC:
132 	/*
133 	 * FIOQSIZE queries the size of a regular file, directory, or link.
134 	 *
135 	 * We still permit it, because it always returns -ENOTTY for
136 	 * other file types.
137 	 */
138 	case FIOQSIZE:
139 	/*
140 	 * FIFREEZE and FITHAW freeze and thaw the file system which the
141 	 * given file belongs to.  Requires CAP_SYS_ADMIN.
142 	 *
143 	 * These commands operate on the file system's superblock rather
144 	 * than on the file itself.  The same operations can also be
145 	 * done through any other file or directory on the same file
146 	 * system, so it is safe to permit these.
147 	 */
148 	case FIFREEZE:
149 	case FITHAW:
150 	/*
151 	 * FS_IOC_FIEMAP queries information about the allocation of
152 	 * blocks within a file.
153 	 *
154 	 * This IOCTL command only makes sense for regular files and is
155 	 * not implemented by devices. It is harmless to permit.
156 	 */
157 	case FS_IOC_FIEMAP:
158 	/*
159 	 * FIGETBSZ queries the file system's block size for a file or
160 	 * directory.
161 	 *
162 	 * This command operates on the file system's superblock rather
163 	 * than on the file itself.  The same operation can also be done
164 	 * through any other file or directory on the same file system,
165 	 * so it is safe to permit it.
166 	 */
167 	case FIGETBSZ:
168 	/*
169 	 * FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
170 	 * their underlying storage ("reflink") between source and
171 	 * destination FDs, on file systems which support that.
172 	 *
173 	 * These IOCTL commands only apply to regular files
174 	 * and are harmless to permit for device files.
175 	 */
176 	case FICLONE:
177 	case FICLONERANGE:
178 	case FIDEDUPERANGE:
179 	/*
180 	 * FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
181 	 * the file system superblock, not on the specific file, so
182 	 * these operations are available through any other file on the
183 	 * same file system as well.
184 	 */
185 	case FS_IOC_GETFSUUID:
186 	case FS_IOC_GETFSSYSFSPATH:
187 		return true;
188 
189 	/*
190 	 * FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
191 	 * FS_IOC_FSSETXATTR are forwarded to device implementations.
192 	 */
193 
194 	/*
195 	 * file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
196 	 * FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
197 	 * forwarded to device implementations, so not permitted.
198 	 */
199 
200 	/* Other commands are guarded by the access right. */
201 	default:
202 		return false;
203 	}
204 }
205 
206 /*
207  * is_masked_device_ioctl_compat - same as the helper above, but checking the
208  * "compat" IOCTL commands.
209  *
210  * The IOCTL commands with special handling in compat-mode should behave the
211  * same as their non-compat counterparts.
212  */
213 static __attribute_const__ bool
is_masked_device_ioctl_compat(const unsigned int cmd)214 is_masked_device_ioctl_compat(const unsigned int cmd)
215 {
216 	switch (cmd) {
217 	/* FICLONE is permitted, same as in the non-compat variant. */
218 	case FICLONE:
219 		return true;
220 
221 #if defined(CONFIG_X86_64)
222 	/*
223 	 * FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
224 	 * FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
225 	 * for consistency with their non-compat variants.
226 	 */
227 	case FS_IOC_RESVSP_32:
228 	case FS_IOC_RESVSP64_32:
229 	case FS_IOC_UNRESVSP_32:
230 	case FS_IOC_UNRESVSP64_32:
231 	case FS_IOC_ZERO_RANGE_32:
232 #endif
233 
234 	/*
235 	 * FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
236 	 * implementations.
237 	 */
238 	case FS_IOC32_GETFLAGS:
239 	case FS_IOC32_SETFLAGS:
240 		return false;
241 	default:
242 		return is_masked_device_ioctl(cmd);
243 	}
244 }
245 
246 /* Ruleset management */
247 
get_inode_object(struct inode * const inode)248 static struct landlock_object *get_inode_object(struct inode *const inode)
249 {
250 	struct landlock_object *object, *new_object;
251 	struct landlock_inode_security *inode_sec = landlock_inode(inode);
252 
253 	rcu_read_lock();
254 retry:
255 	object = rcu_dereference(inode_sec->object);
256 	if (object) {
257 		if (likely(refcount_inc_not_zero(&object->usage))) {
258 			rcu_read_unlock();
259 			return object;
260 		}
261 		/*
262 		 * We are racing with release_inode(), the object is going
263 		 * away.  Wait for release_inode(), then retry.
264 		 */
265 		spin_lock(&object->lock);
266 		spin_unlock(&object->lock);
267 		goto retry;
268 	}
269 	rcu_read_unlock();
270 
271 	/*
272 	 * If there is no object tied to @inode, then create a new one (without
273 	 * holding any locks).
274 	 */
275 	new_object = landlock_create_object(&landlock_fs_underops, inode);
276 	if (IS_ERR(new_object))
277 		return new_object;
278 
279 	/*
280 	 * Protects against concurrent calls to get_inode_object() or
281 	 * hook_sb_delete().
282 	 */
283 	spin_lock(&inode->i_lock);
284 	if (unlikely(rcu_access_pointer(inode_sec->object))) {
285 		/* Someone else just created the object, bail out and retry. */
286 		spin_unlock(&inode->i_lock);
287 		kfree(new_object);
288 
289 		rcu_read_lock();
290 		goto retry;
291 	}
292 
293 	/*
294 	 * @inode will be released by hook_sb_delete() on its superblock
295 	 * shutdown, or by release_inode() when no more ruleset references the
296 	 * related object.
297 	 */
298 	ihold(inode);
299 	rcu_assign_pointer(inode_sec->object, new_object);
300 	spin_unlock(&inode->i_lock);
301 	return new_object;
302 }
303 
304 /* All access rights that can be tied to files. */
305 /* clang-format off */
306 #define ACCESS_FILE ( \
307 	LANDLOCK_ACCESS_FS_EXECUTE | \
308 	LANDLOCK_ACCESS_FS_WRITE_FILE | \
309 	LANDLOCK_ACCESS_FS_READ_FILE | \
310 	LANDLOCK_ACCESS_FS_TRUNCATE | \
311 	LANDLOCK_ACCESS_FS_IOCTL_DEV)
312 /* clang-format on */
313 
314 /*
315  * @path: Should have been checked by get_path_from_fd().
316  */
landlock_append_fs_rule(struct landlock_ruleset * const ruleset,const struct path * const path,access_mask_t access_rights)317 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
318 			    const struct path *const path,
319 			    access_mask_t access_rights)
320 {
321 	int err;
322 	struct landlock_id id = {
323 		.type = LANDLOCK_KEY_INODE,
324 	};
325 
326 	/* Files only get access rights that make sense. */
327 	if (!d_is_dir(path->dentry) &&
328 	    (access_rights | ACCESS_FILE) != ACCESS_FILE)
329 		return -EINVAL;
330 	if (WARN_ON_ONCE(ruleset->num_layers != 1))
331 		return -EINVAL;
332 
333 	/* Transforms relative access rights to absolute ones. */
334 	access_rights |= LANDLOCK_MASK_ACCESS_FS &
335 			 ~landlock_get_fs_access_mask(ruleset, 0);
336 	id.key.object = get_inode_object(d_backing_inode(path->dentry));
337 	if (IS_ERR(id.key.object))
338 		return PTR_ERR(id.key.object);
339 	mutex_lock(&ruleset->lock);
340 	err = landlock_insert_rule(ruleset, id, access_rights);
341 	mutex_unlock(&ruleset->lock);
342 	/*
343 	 * No need to check for an error because landlock_insert_rule()
344 	 * increments the refcount for the new object if needed.
345 	 */
346 	landlock_put_object(id.key.object);
347 	return err;
348 }
349 
350 /* Access-control management */
351 
352 /*
353  * The lifetime of the returned rule is tied to @domain.
354  *
355  * Returns NULL if no rule is found or if @dentry is negative.
356  */
357 static const struct landlock_rule *
find_rule(const struct landlock_ruleset * const domain,const struct dentry * const dentry)358 find_rule(const struct landlock_ruleset *const domain,
359 	  const struct dentry *const dentry)
360 {
361 	const struct landlock_rule *rule;
362 	const struct inode *inode;
363 	struct landlock_id id = {
364 		.type = LANDLOCK_KEY_INODE,
365 	};
366 
367 	/* Ignores nonexistent leafs. */
368 	if (d_is_negative(dentry))
369 		return NULL;
370 
371 	inode = d_backing_inode(dentry);
372 	rcu_read_lock();
373 	id.key.object = rcu_dereference(landlock_inode(inode)->object);
374 	rule = landlock_find_rule(domain, id);
375 	rcu_read_unlock();
376 	return rule;
377 }
378 
379 /*
380  * Allows access to pseudo filesystems that will never be mountable (e.g.
381  * sockfs, pipefs), but can still be reachable through
382  * /proc/<pid>/fd/<file-descriptor>
383  */
is_nouser_or_private(const struct dentry * dentry)384 static bool is_nouser_or_private(const struct dentry *dentry)
385 {
386 	return (dentry->d_sb->s_flags & SB_NOUSER) ||
387 	       (d_is_positive(dentry) &&
388 		unlikely(IS_PRIVATE(d_backing_inode(dentry))));
389 }
390 
391 static access_mask_t
get_raw_handled_fs_accesses(const struct landlock_ruleset * const domain)392 get_raw_handled_fs_accesses(const struct landlock_ruleset *const domain)
393 {
394 	access_mask_t access_dom = 0;
395 	size_t layer_level;
396 
397 	for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
398 		access_dom |=
399 			landlock_get_raw_fs_access_mask(domain, layer_level);
400 	return access_dom;
401 }
402 
403 static access_mask_t
get_handled_fs_accesses(const struct landlock_ruleset * const domain)404 get_handled_fs_accesses(const struct landlock_ruleset *const domain)
405 {
406 	/* Handles all initially denied by default access rights. */
407 	return get_raw_handled_fs_accesses(domain) |
408 	       LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
409 }
410 
411 static const struct landlock_ruleset *
get_fs_domain(const struct landlock_ruleset * const domain)412 get_fs_domain(const struct landlock_ruleset *const domain)
413 {
414 	if (!domain || !get_raw_handled_fs_accesses(domain))
415 		return NULL;
416 
417 	return domain;
418 }
419 
get_current_fs_domain(void)420 static const struct landlock_ruleset *get_current_fs_domain(void)
421 {
422 	return get_fs_domain(landlock_get_current_domain());
423 }
424 
425 /*
426  * Check that a destination file hierarchy has more restrictions than a source
427  * file hierarchy.  This is only used for link and rename actions.
428  *
429  * @layer_masks_child2: Optional child masks.
430  */
no_more_access(const layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],const bool child1_is_directory,const layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],const bool child2_is_directory)431 static bool no_more_access(
432 	const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
433 	const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
434 	const bool child1_is_directory,
435 	const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
436 	const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
437 	const bool child2_is_directory)
438 {
439 	unsigned long access_bit;
440 
441 	for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
442 	     access_bit++) {
443 		/* Ignores accesses that only make sense for directories. */
444 		const bool is_file_access =
445 			!!(BIT_ULL(access_bit) & ACCESS_FILE);
446 
447 		if (child1_is_directory || is_file_access) {
448 			/*
449 			 * Checks if the destination restrictions are a
450 			 * superset of the source ones (i.e. inherited access
451 			 * rights without child exceptions):
452 			 * restrictions(parent2) >= restrictions(child1)
453 			 */
454 			if ((((*layer_masks_parent1)[access_bit] &
455 			      (*layer_masks_child1)[access_bit]) |
456 			     (*layer_masks_parent2)[access_bit]) !=
457 			    (*layer_masks_parent2)[access_bit])
458 				return false;
459 		}
460 
461 		if (!layer_masks_child2)
462 			continue;
463 		if (child2_is_directory || is_file_access) {
464 			/*
465 			 * Checks inverted restrictions for RENAME_EXCHANGE:
466 			 * restrictions(parent1) >= restrictions(child2)
467 			 */
468 			if ((((*layer_masks_parent2)[access_bit] &
469 			      (*layer_masks_child2)[access_bit]) |
470 			     (*layer_masks_parent1)[access_bit]) !=
471 			    (*layer_masks_parent1)[access_bit])
472 				return false;
473 		}
474 	}
475 	return true;
476 }
477 
478 #define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
479 #define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
480 
481 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
482 
test_no_more_access(struct kunit * const test)483 static void test_no_more_access(struct kunit *const test)
484 {
485 	const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
486 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
487 		[BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
488 	};
489 	const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
490 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
491 		[BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
492 	};
493 	const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
494 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
495 	};
496 	const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
497 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
498 	};
499 	const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
500 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
501 							  BIT_ULL(1),
502 	};
503 	const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
504 
505 	/* Checks without restriction. */
506 	NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
507 	NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
508 	NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
509 
510 	/*
511 	 * Checks that we can only refer a file if no more access could be
512 	 * inherited.
513 	 */
514 	NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
515 	NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
516 	NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
517 	NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
518 
519 	/* Checks allowed referring with different nested domains. */
520 	NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
521 	NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
522 	NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
523 	NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
524 	NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
525 	NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
526 	NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
527 
528 	/* Checks that file access rights are also enforced for a directory. */
529 	NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
530 
531 	/* Checks that directory access rights don't impact file referring... */
532 	NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
533 	/* ...but only directory referring. */
534 	NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
535 
536 	/* Checks directory exchange. */
537 	NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
538 	NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
539 	NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
540 	NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
541 	NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
542 
543 	/* Checks file exchange with directory access rights... */
544 	NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
545 	NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
546 	NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
547 	NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
548 	/* ...and with file access rights. */
549 	NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
550 	NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
551 	NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
552 	NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
553 	NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
554 
555 	/*
556 	 * Allowing the following requests should not be a security risk
557 	 * because domain 0 denies execute access, and domain 1 is always
558 	 * nested with domain 0.  However, adding an exception for this case
559 	 * would mean to check all nested domains to make sure none can get
560 	 * more privileges (e.g. processes only sandboxed by domain 0).
561 	 * Moreover, this behavior (i.e. composition of N domains) could then
562 	 * be inconsistent compared to domain 1's ruleset alone (e.g. it might
563 	 * be denied to link/rename with domain 1's ruleset, whereas it would
564 	 * be allowed if nested on top of domain 0).  Another drawback would be
565 	 * to create a cover channel that could enable sandboxed processes to
566 	 * infer most of the filesystem restrictions from their domain.  To
567 	 * make it simple, efficient, safe, and more consistent, this case is
568 	 * always denied.
569 	 */
570 	NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
571 	NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
572 	NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
573 	NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
574 
575 	/* Checks the same case of exclusive domains with a file... */
576 	NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
577 	NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
578 	NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
579 	NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
580 	/* ...and with a directory. */
581 	NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
582 	NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
583 	NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
584 }
585 
586 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
587 
588 #undef NMA_TRUE
589 #undef NMA_FALSE
590 
591 /*
592  * Removes @layer_masks accesses that are not requested.
593  *
594  * Returns true if the request is allowed, false otherwise.
595  */
596 static bool
scope_to_request(const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])597 scope_to_request(const access_mask_t access_request,
598 		 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
599 {
600 	const unsigned long access_req = access_request;
601 	unsigned long access_bit;
602 
603 	if (WARN_ON_ONCE(!layer_masks))
604 		return true;
605 
606 	for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
607 		(*layer_masks)[access_bit] = 0;
608 	return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
609 }
610 
611 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
612 
test_scope_to_request_with_exec_none(struct kunit * const test)613 static void test_scope_to_request_with_exec_none(struct kunit *const test)
614 {
615 	/* Allows everything. */
616 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
617 
618 	/* Checks and scopes with execute. */
619 	KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
620 						 &layer_masks));
621 	KUNIT_EXPECT_EQ(test, 0,
622 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
623 	KUNIT_EXPECT_EQ(test, 0,
624 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
625 }
626 
test_scope_to_request_with_exec_some(struct kunit * const test)627 static void test_scope_to_request_with_exec_some(struct kunit *const test)
628 {
629 	/* Denies execute and write. */
630 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
631 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
632 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
633 	};
634 
635 	/* Checks and scopes with execute. */
636 	KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
637 						  &layer_masks));
638 	KUNIT_EXPECT_EQ(test, BIT_ULL(0),
639 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
640 	KUNIT_EXPECT_EQ(test, 0,
641 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
642 }
643 
test_scope_to_request_without_access(struct kunit * const test)644 static void test_scope_to_request_without_access(struct kunit *const test)
645 {
646 	/* Denies execute and write. */
647 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
648 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
649 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
650 	};
651 
652 	/* Checks and scopes without access request. */
653 	KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
654 	KUNIT_EXPECT_EQ(test, 0,
655 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
656 	KUNIT_EXPECT_EQ(test, 0,
657 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
658 }
659 
660 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
661 
662 /*
663  * Returns true if there is at least one access right different than
664  * LANDLOCK_ACCESS_FS_REFER.
665  */
666 static bool
is_eacces(const layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS],const access_mask_t access_request)667 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
668 	  const access_mask_t access_request)
669 {
670 	unsigned long access_bit;
671 	/* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
672 	const unsigned long access_check = access_request &
673 					   ~LANDLOCK_ACCESS_FS_REFER;
674 
675 	if (!layer_masks)
676 		return false;
677 
678 	for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
679 		if ((*layer_masks)[access_bit])
680 			return true;
681 	}
682 	return false;
683 }
684 
685 #define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
686 #define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
687 
688 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
689 
test_is_eacces_with_none(struct kunit * const test)690 static void test_is_eacces_with_none(struct kunit *const test)
691 {
692 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
693 
694 	IE_FALSE(&layer_masks, 0);
695 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
696 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
697 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
698 }
699 
test_is_eacces_with_refer(struct kunit * const test)700 static void test_is_eacces_with_refer(struct kunit *const test)
701 {
702 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
703 		[BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
704 	};
705 
706 	IE_FALSE(&layer_masks, 0);
707 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
708 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
709 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
710 }
711 
test_is_eacces_with_write(struct kunit * const test)712 static void test_is_eacces_with_write(struct kunit *const test)
713 {
714 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
715 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
716 	};
717 
718 	IE_FALSE(&layer_masks, 0);
719 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
720 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
721 
722 	IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
723 }
724 
725 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
726 
727 #undef IE_TRUE
728 #undef IE_FALSE
729 
730 /**
731  * is_access_to_paths_allowed - Check accesses for requests with a common path
732  *
733  * @domain: Domain to check against.
734  * @path: File hierarchy to walk through.
735  * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
736  *     equal to @layer_masks_parent2 (if any).  This is tied to the unique
737  *     requested path for most actions, or the source in case of a refer action
738  *     (i.e. rename or link), or the source and destination in case of
739  *     RENAME_EXCHANGE.
740  * @layer_masks_parent1: Pointer to a matrix of layer masks per access
741  *     masks, identifying the layers that forbid a specific access.  Bits from
742  *     this matrix can be unset according to the @path walk.  An empty matrix
743  *     means that @domain allows all possible Landlock accesses (i.e. not only
744  *     those identified by @access_request_parent1).  This matrix can
745  *     initially refer to domain layer masks and, when the accesses for the
746  *     destination and source are the same, to requested layer masks.
747  * @dentry_child1: Dentry to the initial child of the parent1 path.  This
748  *     pointer must be NULL for non-refer actions (i.e. not link nor rename).
749  * @access_request_parent2: Similar to @access_request_parent1 but for a
750  *     request involving a source and a destination.  This refers to the
751  *     destination, except in case of RENAME_EXCHANGE where it also refers to
752  *     the source.  Must be set to 0 when using a simple path request.
753  * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
754  *     action.  This must be NULL otherwise.
755  * @dentry_child2: Dentry to the initial child of the parent2 path.  This
756  *     pointer is only set for RENAME_EXCHANGE actions and must be NULL
757  *     otherwise.
758  *
759  * This helper first checks that the destination has a superset of restrictions
760  * compared to the source (if any) for a common path.  Because of
761  * RENAME_EXCHANGE actions, source and destinations may be swapped.  It then
762  * checks that the collected accesses and the remaining ones are enough to
763  * allow the request.
764  *
765  * Returns:
766  * - true if the access request is granted;
767  * - false otherwise.
768  */
is_access_to_paths_allowed(const struct landlock_ruleset * const domain,const struct path * const path,const access_mask_t access_request_parent1,layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child1,const access_mask_t access_request_parent2,layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child2)769 static bool is_access_to_paths_allowed(
770 	const struct landlock_ruleset *const domain,
771 	const struct path *const path,
772 	const access_mask_t access_request_parent1,
773 	layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
774 	const struct dentry *const dentry_child1,
775 	const access_mask_t access_request_parent2,
776 	layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
777 	const struct dentry *const dentry_child2)
778 {
779 	bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
780 	     child1_is_directory = true, child2_is_directory = true;
781 	struct path walker_path;
782 	access_mask_t access_masked_parent1, access_masked_parent2;
783 	layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
784 		_layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
785 	layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
786 	(*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
787 
788 	if (!access_request_parent1 && !access_request_parent2)
789 		return true;
790 	if (WARN_ON_ONCE(!domain || !path))
791 		return true;
792 	if (is_nouser_or_private(path->dentry))
793 		return true;
794 	if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
795 		return false;
796 
797 	if (unlikely(layer_masks_parent2)) {
798 		if (WARN_ON_ONCE(!dentry_child1))
799 			return false;
800 		/*
801 		 * For a double request, first check for potential privilege
802 		 * escalation by looking at domain handled accesses (which are
803 		 * a superset of the meaningful requested accesses).
804 		 */
805 		access_masked_parent1 = access_masked_parent2 =
806 			get_handled_fs_accesses(domain);
807 		is_dom_check = true;
808 	} else {
809 		if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
810 			return false;
811 		/* For a simple request, only check for requested accesses. */
812 		access_masked_parent1 = access_request_parent1;
813 		access_masked_parent2 = access_request_parent2;
814 		is_dom_check = false;
815 	}
816 
817 	if (unlikely(dentry_child1)) {
818 		landlock_unmask_layers(
819 			find_rule(domain, dentry_child1),
820 			landlock_init_layer_masks(
821 				domain, LANDLOCK_MASK_ACCESS_FS,
822 				&_layer_masks_child1, LANDLOCK_KEY_INODE),
823 			&_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
824 		layer_masks_child1 = &_layer_masks_child1;
825 		child1_is_directory = d_is_dir(dentry_child1);
826 	}
827 	if (unlikely(dentry_child2)) {
828 		landlock_unmask_layers(
829 			find_rule(domain, dentry_child2),
830 			landlock_init_layer_masks(
831 				domain, LANDLOCK_MASK_ACCESS_FS,
832 				&_layer_masks_child2, LANDLOCK_KEY_INODE),
833 			&_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
834 		layer_masks_child2 = &_layer_masks_child2;
835 		child2_is_directory = d_is_dir(dentry_child2);
836 	}
837 
838 	walker_path = *path;
839 	path_get(&walker_path);
840 	/*
841 	 * We need to walk through all the hierarchy to not miss any relevant
842 	 * restriction.
843 	 */
844 	while (true) {
845 		struct dentry *parent_dentry;
846 		const struct landlock_rule *rule;
847 
848 		/*
849 		 * If at least all accesses allowed on the destination are
850 		 * already allowed on the source, respectively if there is at
851 		 * least as much as restrictions on the destination than on the
852 		 * source, then we can safely refer files from the source to
853 		 * the destination without risking a privilege escalation.
854 		 * This also applies in the case of RENAME_EXCHANGE, which
855 		 * implies checks on both direction.  This is crucial for
856 		 * standalone multilayered security policies.  Furthermore,
857 		 * this helps avoid policy writers to shoot themselves in the
858 		 * foot.
859 		 */
860 		if (unlikely(is_dom_check &&
861 			     no_more_access(
862 				     layer_masks_parent1, layer_masks_child1,
863 				     child1_is_directory, layer_masks_parent2,
864 				     layer_masks_child2,
865 				     child2_is_directory))) {
866 			allowed_parent1 = scope_to_request(
867 				access_request_parent1, layer_masks_parent1);
868 			allowed_parent2 = scope_to_request(
869 				access_request_parent2, layer_masks_parent2);
870 
871 			/* Stops when all accesses are granted. */
872 			if (allowed_parent1 && allowed_parent2)
873 				break;
874 
875 			/*
876 			 * Now, downgrades the remaining checks from domain
877 			 * handled accesses to requested accesses.
878 			 */
879 			is_dom_check = false;
880 			access_masked_parent1 = access_request_parent1;
881 			access_masked_parent2 = access_request_parent2;
882 		}
883 
884 		rule = find_rule(domain, walker_path.dentry);
885 		allowed_parent1 = landlock_unmask_layers(
886 			rule, access_masked_parent1, layer_masks_parent1,
887 			ARRAY_SIZE(*layer_masks_parent1));
888 		allowed_parent2 = landlock_unmask_layers(
889 			rule, access_masked_parent2, layer_masks_parent2,
890 			ARRAY_SIZE(*layer_masks_parent2));
891 
892 		/* Stops when a rule from each layer grants access. */
893 		if (allowed_parent1 && allowed_parent2)
894 			break;
895 jump_up:
896 		if (walker_path.dentry == walker_path.mnt->mnt_root) {
897 			if (follow_up(&walker_path)) {
898 				/* Ignores hidden mount points. */
899 				goto jump_up;
900 			} else {
901 				/*
902 				 * Stops at the real root.  Denies access
903 				 * because not all layers have granted access.
904 				 */
905 				break;
906 			}
907 		}
908 		if (unlikely(IS_ROOT(walker_path.dentry))) {
909 			/*
910 			 * Stops at disconnected root directories.  Only allows
911 			 * access to internal filesystems (e.g. nsfs, which is
912 			 * reachable through /proc/<pid>/ns/<namespace>).
913 			 */
914 			allowed_parent1 = allowed_parent2 =
915 				!!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
916 			break;
917 		}
918 		parent_dentry = dget_parent(walker_path.dentry);
919 		dput(walker_path.dentry);
920 		walker_path.dentry = parent_dentry;
921 	}
922 	path_put(&walker_path);
923 
924 	return allowed_parent1 && allowed_parent2;
925 }
926 
check_access_path(const struct landlock_ruleset * const domain,const struct path * const path,access_mask_t access_request)927 static int check_access_path(const struct landlock_ruleset *const domain,
928 			     const struct path *const path,
929 			     access_mask_t access_request)
930 {
931 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
932 
933 	access_request = landlock_init_layer_masks(
934 		domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
935 	if (is_access_to_paths_allowed(domain, path, access_request,
936 				       &layer_masks, NULL, 0, NULL, NULL))
937 		return 0;
938 	return -EACCES;
939 }
940 
current_check_access_path(const struct path * const path,const access_mask_t access_request)941 static int current_check_access_path(const struct path *const path,
942 				     const access_mask_t access_request)
943 {
944 	const struct landlock_ruleset *const dom = get_current_fs_domain();
945 
946 	if (!dom)
947 		return 0;
948 	return check_access_path(dom, path, access_request);
949 }
950 
get_mode_access(const umode_t mode)951 static access_mask_t get_mode_access(const umode_t mode)
952 {
953 	switch (mode & S_IFMT) {
954 	case S_IFLNK:
955 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
956 	case 0:
957 		/* A zero mode translates to S_IFREG. */
958 	case S_IFREG:
959 		return LANDLOCK_ACCESS_FS_MAKE_REG;
960 	case S_IFDIR:
961 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
962 	case S_IFCHR:
963 		return LANDLOCK_ACCESS_FS_MAKE_CHAR;
964 	case S_IFBLK:
965 		return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
966 	case S_IFIFO:
967 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
968 	case S_IFSOCK:
969 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
970 	default:
971 		WARN_ON_ONCE(1);
972 		return 0;
973 	}
974 }
975 
maybe_remove(const struct dentry * const dentry)976 static access_mask_t maybe_remove(const struct dentry *const dentry)
977 {
978 	if (d_is_negative(dentry))
979 		return 0;
980 	return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
981 				  LANDLOCK_ACCESS_FS_REMOVE_FILE;
982 }
983 
984 /**
985  * collect_domain_accesses - Walk through a file path and collect accesses
986  *
987  * @domain: Domain to check against.
988  * @mnt_root: Last directory to check.
989  * @dir: Directory to start the walk from.
990  * @layer_masks_dom: Where to store the collected accesses.
991  *
992  * This helper is useful to begin a path walk from the @dir directory to a
993  * @mnt_root directory used as a mount point.  This mount point is the common
994  * ancestor between the source and the destination of a renamed and linked
995  * file.  While walking from @dir to @mnt_root, we record all the domain's
996  * allowed accesses in @layer_masks_dom.
997  *
998  * This is similar to is_access_to_paths_allowed() but much simpler because it
999  * only handles walking on the same mount point and only checks one set of
1000  * accesses.
1001  *
1002  * Returns:
1003  * - true if all the domain access rights are allowed for @dir;
1004  * - false if the walk reached @mnt_root.
1005  */
collect_domain_accesses(const struct landlock_ruleset * const domain,const struct dentry * const mnt_root,struct dentry * dir,layer_mask_t (* const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])1006 static bool collect_domain_accesses(
1007 	const struct landlock_ruleset *const domain,
1008 	const struct dentry *const mnt_root, struct dentry *dir,
1009 	layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
1010 {
1011 	unsigned long access_dom;
1012 	bool ret = false;
1013 
1014 	if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
1015 		return true;
1016 	if (is_nouser_or_private(dir))
1017 		return true;
1018 
1019 	access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
1020 					       layer_masks_dom,
1021 					       LANDLOCK_KEY_INODE);
1022 
1023 	dget(dir);
1024 	while (true) {
1025 		struct dentry *parent_dentry;
1026 
1027 		/* Gets all layers allowing all domain accesses. */
1028 		if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
1029 					   layer_masks_dom,
1030 					   ARRAY_SIZE(*layer_masks_dom))) {
1031 			/*
1032 			 * Stops when all handled accesses are allowed by at
1033 			 * least one rule in each layer.
1034 			 */
1035 			ret = true;
1036 			break;
1037 		}
1038 
1039 		/* We should not reach a root other than @mnt_root. */
1040 		if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
1041 			break;
1042 
1043 		parent_dentry = dget_parent(dir);
1044 		dput(dir);
1045 		dir = parent_dentry;
1046 	}
1047 	dput(dir);
1048 	return ret;
1049 }
1050 
1051 /**
1052  * current_check_refer_path - Check if a rename or link action is allowed
1053  *
1054  * @old_dentry: File or directory requested to be moved or linked.
1055  * @new_dir: Destination parent directory.
1056  * @new_dentry: Destination file or directory.
1057  * @removable: Sets to true if it is a rename operation.
1058  * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
1059  *
1060  * Because of its unprivileged constraints, Landlock relies on file hierarchies
1061  * (and not only inodes) to tie access rights to files.  Being able to link or
1062  * rename a file hierarchy brings some challenges.  Indeed, moving or linking a
1063  * file (i.e. creating a new reference to an inode) can have an impact on the
1064  * actions allowed for a set of files if it would change its parent directory
1065  * (i.e. reparenting).
1066  *
1067  * To avoid trivial access right bypasses, Landlock first checks if the file or
1068  * directory requested to be moved would gain new access rights inherited from
1069  * its new hierarchy.  Before returning any error, Landlock then checks that
1070  * the parent source hierarchy and the destination hierarchy would allow the
1071  * link or rename action.  If it is not the case, an error with EACCES is
1072  * returned to inform user space that there is no way to remove or create the
1073  * requested source file type.  If it should be allowed but the new inherited
1074  * access rights would be greater than the source access rights, then the
1075  * kernel returns an error with EXDEV.  Prioritizing EACCES over EXDEV enables
1076  * user space to abort the whole operation if there is no way to do it, or to
1077  * manually copy the source to the destination if this remains allowed, e.g.
1078  * because file creation is allowed on the destination directory but not direct
1079  * linking.
1080  *
1081  * To achieve this goal, the kernel needs to compare two file hierarchies: the
1082  * one identifying the source file or directory (including itself), and the
1083  * destination one.  This can be seen as a multilayer partial ordering problem.
1084  * The kernel walks through these paths and collects in a matrix the access
1085  * rights that are denied per layer.  These matrices are then compared to see
1086  * if the destination one has more (or the same) restrictions as the source
1087  * one.  If this is the case, the requested action will not return EXDEV, which
1088  * doesn't mean the action is allowed.  The parent hierarchy of the source
1089  * (i.e. parent directory), and the destination hierarchy must also be checked
1090  * to verify that they explicitly allow such action (i.e.  referencing,
1091  * creation and potentially removal rights).  The kernel implementation is then
1092  * required to rely on potentially four matrices of access rights: one for the
1093  * source file or directory (i.e. the child), a potentially other one for the
1094  * other source/destination (in case of RENAME_EXCHANGE), one for the source
1095  * parent hierarchy and a last one for the destination hierarchy.  These
1096  * ephemeral matrices take some space on the stack, which limits the number of
1097  * layers to a deemed reasonable number: 16.
1098  *
1099  * Returns:
1100  * - 0 if access is allowed;
1101  * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
1102  * - -EACCES if file removal or creation is denied.
1103  */
current_check_refer_path(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const bool removable,const bool exchange)1104 static int current_check_refer_path(struct dentry *const old_dentry,
1105 				    const struct path *const new_dir,
1106 				    struct dentry *const new_dentry,
1107 				    const bool removable, const bool exchange)
1108 {
1109 	const struct landlock_ruleset *const dom = get_current_fs_domain();
1110 	bool allow_parent1, allow_parent2;
1111 	access_mask_t access_request_parent1, access_request_parent2;
1112 	struct path mnt_dir;
1113 	struct dentry *old_parent;
1114 	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
1115 		     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
1116 
1117 	if (!dom)
1118 		return 0;
1119 	if (WARN_ON_ONCE(dom->num_layers < 1))
1120 		return -EACCES;
1121 	if (unlikely(d_is_negative(old_dentry)))
1122 		return -ENOENT;
1123 	if (exchange) {
1124 		if (unlikely(d_is_negative(new_dentry)))
1125 			return -ENOENT;
1126 		access_request_parent1 =
1127 			get_mode_access(d_backing_inode(new_dentry)->i_mode);
1128 	} else {
1129 		access_request_parent1 = 0;
1130 	}
1131 	access_request_parent2 =
1132 		get_mode_access(d_backing_inode(old_dentry)->i_mode);
1133 	if (removable) {
1134 		access_request_parent1 |= maybe_remove(old_dentry);
1135 		access_request_parent2 |= maybe_remove(new_dentry);
1136 	}
1137 
1138 	/* The mount points are the same for old and new paths, cf. EXDEV. */
1139 	if (old_dentry->d_parent == new_dir->dentry) {
1140 		/*
1141 		 * The LANDLOCK_ACCESS_FS_REFER access right is not required
1142 		 * for same-directory referer (i.e. no reparenting).
1143 		 */
1144 		access_request_parent1 = landlock_init_layer_masks(
1145 			dom, access_request_parent1 | access_request_parent2,
1146 			&layer_masks_parent1, LANDLOCK_KEY_INODE);
1147 		if (is_access_to_paths_allowed(
1148 			    dom, new_dir, access_request_parent1,
1149 			    &layer_masks_parent1, NULL, 0, NULL, NULL))
1150 			return 0;
1151 		return -EACCES;
1152 	}
1153 
1154 	access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
1155 	access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
1156 
1157 	/* Saves the common mount point. */
1158 	mnt_dir.mnt = new_dir->mnt;
1159 	mnt_dir.dentry = new_dir->mnt->mnt_root;
1160 
1161 	/*
1162 	 * old_dentry may be the root of the common mount point and
1163 	 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
1164 	 * OPEN_TREE_CLONE).  We do not need to call dget(old_parent) because
1165 	 * we keep a reference to old_dentry.
1166 	 */
1167 	old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
1168 						      old_dentry->d_parent;
1169 
1170 	/* new_dir->dentry is equal to new_dentry->d_parent */
1171 	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
1172 						&layer_masks_parent1);
1173 	allow_parent2 = collect_domain_accesses(
1174 		dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
1175 
1176 	if (allow_parent1 && allow_parent2)
1177 		return 0;
1178 
1179 	/*
1180 	 * To be able to compare source and destination domain access rights,
1181 	 * take into account the @old_dentry access rights aggregated with its
1182 	 * parent access rights.  This will be useful to compare with the
1183 	 * destination parent access rights.
1184 	 */
1185 	if (is_access_to_paths_allowed(
1186 		    dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
1187 		    old_dentry, access_request_parent2, &layer_masks_parent2,
1188 		    exchange ? new_dentry : NULL))
1189 		return 0;
1190 
1191 	/*
1192 	 * This prioritizes EACCES over EXDEV for all actions, including
1193 	 * renames with RENAME_EXCHANGE.
1194 	 */
1195 	if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
1196 		   is_eacces(&layer_masks_parent2, access_request_parent2)))
1197 		return -EACCES;
1198 
1199 	/*
1200 	 * Gracefully forbids reparenting if the destination directory
1201 	 * hierarchy is not a superset of restrictions of the source directory
1202 	 * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
1203 	 * source or the destination.
1204 	 */
1205 	return -EXDEV;
1206 }
1207 
1208 /* Inode hooks */
1209 
hook_inode_free_security(struct inode * const inode)1210 static void hook_inode_free_security(struct inode *const inode)
1211 {
1212 	/*
1213 	 * All inodes must already have been untied from their object by
1214 	 * release_inode() or hook_sb_delete().
1215 	 */
1216 	WARN_ON_ONCE(landlock_inode(inode)->object);
1217 }
1218 
1219 /* Super-block hooks */
1220 
1221 /*
1222  * Release the inodes used in a security policy.
1223  *
1224  * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
1225  */
hook_sb_delete(struct super_block * const sb)1226 static void hook_sb_delete(struct super_block *const sb)
1227 {
1228 	struct inode *inode, *prev_inode = NULL;
1229 
1230 	if (!landlock_initialized)
1231 		return;
1232 
1233 	spin_lock(&sb->s_inode_list_lock);
1234 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1235 		struct landlock_object *object;
1236 
1237 		/* Only handles referenced inodes. */
1238 		if (!atomic_read(&inode->i_count))
1239 			continue;
1240 
1241 		/*
1242 		 * Protects against concurrent modification of inode (e.g.
1243 		 * from get_inode_object()).
1244 		 */
1245 		spin_lock(&inode->i_lock);
1246 		/*
1247 		 * Checks I_FREEING and I_WILL_FREE  to protect against a race
1248 		 * condition when release_inode() just called iput(), which
1249 		 * could lead to a NULL dereference of inode->security or a
1250 		 * second call to iput() for the same Landlock object.  Also
1251 		 * checks I_NEW because such inode cannot be tied to an object.
1252 		 */
1253 		if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
1254 			spin_unlock(&inode->i_lock);
1255 			continue;
1256 		}
1257 
1258 		rcu_read_lock();
1259 		object = rcu_dereference(landlock_inode(inode)->object);
1260 		if (!object) {
1261 			rcu_read_unlock();
1262 			spin_unlock(&inode->i_lock);
1263 			continue;
1264 		}
1265 		/* Keeps a reference to this inode until the next loop walk. */
1266 		__iget(inode);
1267 		spin_unlock(&inode->i_lock);
1268 
1269 		/*
1270 		 * If there is no concurrent release_inode() ongoing, then we
1271 		 * are in charge of calling iput() on this inode, otherwise we
1272 		 * will just wait for it to finish.
1273 		 */
1274 		spin_lock(&object->lock);
1275 		if (object->underobj == inode) {
1276 			object->underobj = NULL;
1277 			spin_unlock(&object->lock);
1278 			rcu_read_unlock();
1279 
1280 			/*
1281 			 * Because object->underobj was not NULL,
1282 			 * release_inode() and get_inode_object() guarantee
1283 			 * that it is safe to reset
1284 			 * landlock_inode(inode)->object while it is not NULL.
1285 			 * It is therefore not necessary to lock inode->i_lock.
1286 			 */
1287 			rcu_assign_pointer(landlock_inode(inode)->object, NULL);
1288 			/*
1289 			 * At this point, we own the ihold() reference that was
1290 			 * originally set up by get_inode_object() and the
1291 			 * __iget() reference that we just set in this loop
1292 			 * walk.  Therefore the following call to iput() will
1293 			 * not sleep nor drop the inode because there is now at
1294 			 * least two references to it.
1295 			 */
1296 			iput(inode);
1297 		} else {
1298 			spin_unlock(&object->lock);
1299 			rcu_read_unlock();
1300 		}
1301 
1302 		if (prev_inode) {
1303 			/*
1304 			 * At this point, we still own the __iget() reference
1305 			 * that we just set in this loop walk.  Therefore we
1306 			 * can drop the list lock and know that the inode won't
1307 			 * disappear from under us until the next loop walk.
1308 			 */
1309 			spin_unlock(&sb->s_inode_list_lock);
1310 			/*
1311 			 * We can now actually put the inode reference from the
1312 			 * previous loop walk, which is not needed anymore.
1313 			 */
1314 			iput(prev_inode);
1315 			cond_resched();
1316 			spin_lock(&sb->s_inode_list_lock);
1317 		}
1318 		prev_inode = inode;
1319 	}
1320 	spin_unlock(&sb->s_inode_list_lock);
1321 
1322 	/* Puts the inode reference from the last loop walk, if any. */
1323 	if (prev_inode)
1324 		iput(prev_inode);
1325 	/* Waits for pending iput() in release_inode(). */
1326 	wait_var_event(&landlock_superblock(sb)->inode_refs,
1327 		       !atomic_long_read(&landlock_superblock(sb)->inode_refs));
1328 }
1329 
1330 /*
1331  * Because a Landlock security policy is defined according to the filesystem
1332  * topology (i.e. the mount namespace), changing it may grant access to files
1333  * not previously allowed.
1334  *
1335  * To make it simple, deny any filesystem topology modification by landlocked
1336  * processes.  Non-landlocked processes may still change the namespace of a
1337  * landlocked process, but this kind of threat must be handled by a system-wide
1338  * access-control security policy.
1339  *
1340  * This could be lifted in the future if Landlock can safely handle mount
1341  * namespace updates requested by a landlocked process.  Indeed, we could
1342  * update the current domain (which is currently read-only) by taking into
1343  * account the accesses of the source and the destination of a new mount point.
1344  * However, it would also require to make all the child domains dynamically
1345  * inherit these new constraints.  Anyway, for backward compatibility reasons,
1346  * a dedicated user space option would be required (e.g. as a ruleset flag).
1347  */
hook_sb_mount(const char * const dev_name,const struct path * const path,const char * const type,const unsigned long flags,void * const data)1348 static int hook_sb_mount(const char *const dev_name,
1349 			 const struct path *const path, const char *const type,
1350 			 const unsigned long flags, void *const data)
1351 {
1352 	if (!get_current_fs_domain())
1353 		return 0;
1354 	return -EPERM;
1355 }
1356 
hook_move_mount(const struct path * const from_path,const struct path * const to_path)1357 static int hook_move_mount(const struct path *const from_path,
1358 			   const struct path *const to_path)
1359 {
1360 	if (!get_current_fs_domain())
1361 		return 0;
1362 	return -EPERM;
1363 }
1364 
1365 /*
1366  * Removing a mount point may reveal a previously hidden file hierarchy, which
1367  * may then grant access to files, which may have previously been forbidden.
1368  */
hook_sb_umount(struct vfsmount * const mnt,const int flags)1369 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1370 {
1371 	if (!get_current_fs_domain())
1372 		return 0;
1373 	return -EPERM;
1374 }
1375 
hook_sb_remount(struct super_block * const sb,void * const mnt_opts)1376 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1377 {
1378 	if (!get_current_fs_domain())
1379 		return 0;
1380 	return -EPERM;
1381 }
1382 
1383 /*
1384  * pivot_root(2), like mount(2), changes the current mount namespace.  It must
1385  * then be forbidden for a landlocked process.
1386  *
1387  * However, chroot(2) may be allowed because it only changes the relative root
1388  * directory of the current process.  Moreover, it can be used to restrict the
1389  * view of the filesystem.
1390  */
hook_sb_pivotroot(const struct path * const old_path,const struct path * const new_path)1391 static int hook_sb_pivotroot(const struct path *const old_path,
1392 			     const struct path *const new_path)
1393 {
1394 	if (!get_current_fs_domain())
1395 		return 0;
1396 	return -EPERM;
1397 }
1398 
1399 /* Path hooks */
1400 
hook_path_link(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry)1401 static int hook_path_link(struct dentry *const old_dentry,
1402 			  const struct path *const new_dir,
1403 			  struct dentry *const new_dentry)
1404 {
1405 	return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1406 					false);
1407 }
1408 
hook_path_rename(const struct path * const old_dir,struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const unsigned int flags)1409 static int hook_path_rename(const struct path *const old_dir,
1410 			    struct dentry *const old_dentry,
1411 			    const struct path *const new_dir,
1412 			    struct dentry *const new_dentry,
1413 			    const unsigned int flags)
1414 {
1415 	/* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1416 	return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1417 					!!(flags & RENAME_EXCHANGE));
1418 }
1419 
hook_path_mkdir(const struct path * const dir,struct dentry * const dentry,const umode_t mode)1420 static int hook_path_mkdir(const struct path *const dir,
1421 			   struct dentry *const dentry, const umode_t mode)
1422 {
1423 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1424 }
1425 
hook_path_mknod(const struct path * const dir,struct dentry * const dentry,const umode_t mode,const unsigned int dev)1426 static int hook_path_mknod(const struct path *const dir,
1427 			   struct dentry *const dentry, const umode_t mode,
1428 			   const unsigned int dev)
1429 {
1430 	const struct landlock_ruleset *const dom = get_current_fs_domain();
1431 
1432 	if (!dom)
1433 		return 0;
1434 	return check_access_path(dom, dir, get_mode_access(mode));
1435 }
1436 
hook_path_symlink(const struct path * const dir,struct dentry * const dentry,const char * const old_name)1437 static int hook_path_symlink(const struct path *const dir,
1438 			     struct dentry *const dentry,
1439 			     const char *const old_name)
1440 {
1441 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1442 }
1443 
hook_path_unlink(const struct path * const dir,struct dentry * const dentry)1444 static int hook_path_unlink(const struct path *const dir,
1445 			    struct dentry *const dentry)
1446 {
1447 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1448 }
1449 
hook_path_rmdir(const struct path * const dir,struct dentry * const dentry)1450 static int hook_path_rmdir(const struct path *const dir,
1451 			   struct dentry *const dentry)
1452 {
1453 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1454 }
1455 
hook_path_truncate(const struct path * const path)1456 static int hook_path_truncate(const struct path *const path)
1457 {
1458 	return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
1459 }
1460 
1461 /* File hooks */
1462 
1463 /**
1464  * get_required_file_open_access - Get access needed to open a file
1465  *
1466  * @file: File being opened.
1467  *
1468  * Returns the access rights that are required for opening the given file,
1469  * depending on the file type and open mode.
1470  */
1471 static access_mask_t
get_required_file_open_access(const struct file * const file)1472 get_required_file_open_access(const struct file *const file)
1473 {
1474 	access_mask_t access = 0;
1475 
1476 	if (file->f_mode & FMODE_READ) {
1477 		/* A directory can only be opened in read mode. */
1478 		if (S_ISDIR(file_inode(file)->i_mode))
1479 			return LANDLOCK_ACCESS_FS_READ_DIR;
1480 		access = LANDLOCK_ACCESS_FS_READ_FILE;
1481 	}
1482 	if (file->f_mode & FMODE_WRITE)
1483 		access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1484 	/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1485 	if (file->f_flags & __FMODE_EXEC)
1486 		access |= LANDLOCK_ACCESS_FS_EXECUTE;
1487 	return access;
1488 }
1489 
hook_file_alloc_security(struct file * const file)1490 static int hook_file_alloc_security(struct file *const file)
1491 {
1492 	/*
1493 	 * Grants all access rights, even if most of them are not checked later
1494 	 * on. It is more consistent.
1495 	 *
1496 	 * Notably, file descriptors for regular files can also be acquired
1497 	 * without going through the file_open hook, for example when using
1498 	 * memfd_create(2).
1499 	 */
1500 	landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
1501 	return 0;
1502 }
1503 
is_device(const struct file * const file)1504 static bool is_device(const struct file *const file)
1505 {
1506 	const struct inode *inode = file_inode(file);
1507 
1508 	return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
1509 }
1510 
hook_file_open(struct file * const file)1511 static int hook_file_open(struct file *const file)
1512 {
1513 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
1514 	access_mask_t open_access_request, full_access_request, allowed_access,
1515 		optional_access;
1516 	const struct landlock_ruleset *const dom =
1517 		get_fs_domain(landlock_cred(file->f_cred)->domain);
1518 
1519 	if (!dom)
1520 		return 0;
1521 
1522 	/*
1523 	 * Because a file may be opened with O_PATH, get_required_file_open_access()
1524 	 * may return 0.  This case will be handled with a future Landlock
1525 	 * evolution.
1526 	 */
1527 	open_access_request = get_required_file_open_access(file);
1528 
1529 	/*
1530 	 * We look up more access than what we immediately need for open(), so
1531 	 * that we can later authorize operations on opened files.
1532 	 */
1533 	optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
1534 	if (is_device(file))
1535 		optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
1536 
1537 	full_access_request = open_access_request | optional_access;
1538 
1539 	if (is_access_to_paths_allowed(
1540 		    dom, &file->f_path,
1541 		    landlock_init_layer_masks(dom, full_access_request,
1542 					      &layer_masks, LANDLOCK_KEY_INODE),
1543 		    &layer_masks, NULL, 0, NULL, NULL)) {
1544 		allowed_access = full_access_request;
1545 	} else {
1546 		unsigned long access_bit;
1547 		const unsigned long access_req = full_access_request;
1548 
1549 		/*
1550 		 * Calculate the actual allowed access rights from layer_masks.
1551 		 * Add each access right to allowed_access which has not been
1552 		 * vetoed by any layer.
1553 		 */
1554 		allowed_access = 0;
1555 		for_each_set_bit(access_bit, &access_req,
1556 				 ARRAY_SIZE(layer_masks)) {
1557 			if (!layer_masks[access_bit])
1558 				allowed_access |= BIT_ULL(access_bit);
1559 		}
1560 	}
1561 
1562 	/*
1563 	 * For operations on already opened files (i.e. ftruncate()), it is the
1564 	 * access rights at the time of open() which decide whether the
1565 	 * operation is permitted. Therefore, we record the relevant subset of
1566 	 * file access rights in the opened struct file.
1567 	 */
1568 	landlock_file(file)->allowed_access = allowed_access;
1569 
1570 	if ((open_access_request & allowed_access) == open_access_request)
1571 		return 0;
1572 
1573 	return -EACCES;
1574 }
1575 
hook_file_truncate(struct file * const file)1576 static int hook_file_truncate(struct file *const file)
1577 {
1578 	/*
1579 	 * Allows truncation if the truncate right was available at the time of
1580 	 * opening the file, to get a consistent access check as for read, write
1581 	 * and execute operations.
1582 	 *
1583 	 * Note: For checks done based on the file's Landlock allowed access, we
1584 	 * enforce them independently of whether the current thread is in a
1585 	 * Landlock domain, so that open files passed between independent
1586 	 * processes retain their behaviour.
1587 	 */
1588 	if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
1589 		return 0;
1590 	return -EACCES;
1591 }
1592 
hook_file_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1593 static int hook_file_ioctl(struct file *file, unsigned int cmd,
1594 			   unsigned long arg)
1595 {
1596 	access_mask_t allowed_access = landlock_file(file)->allowed_access;
1597 
1598 	/*
1599 	 * It is the access rights at the time of opening the file which
1600 	 * determine whether IOCTL can be used on the opened file later.
1601 	 *
1602 	 * The access right is attached to the opened file in hook_file_open().
1603 	 */
1604 	if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1605 		return 0;
1606 
1607 	if (!is_device(file))
1608 		return 0;
1609 
1610 	if (is_masked_device_ioctl(cmd))
1611 		return 0;
1612 
1613 	return -EACCES;
1614 }
1615 
hook_file_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1616 static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
1617 				  unsigned long arg)
1618 {
1619 	access_mask_t allowed_access = landlock_file(file)->allowed_access;
1620 
1621 	/*
1622 	 * It is the access rights at the time of opening the file which
1623 	 * determine whether IOCTL can be used on the opened file later.
1624 	 *
1625 	 * The access right is attached to the opened file in hook_file_open().
1626 	 */
1627 	if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1628 		return 0;
1629 
1630 	if (!is_device(file))
1631 		return 0;
1632 
1633 	if (is_masked_device_ioctl_compat(cmd))
1634 		return 0;
1635 
1636 	return -EACCES;
1637 }
1638 
1639 static struct security_hook_list landlock_hooks[] __ro_after_init = {
1640 	LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
1641 
1642 	LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1643 	LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1644 	LSM_HOOK_INIT(move_mount, hook_move_mount),
1645 	LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1646 	LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1647 	LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1648 
1649 	LSM_HOOK_INIT(path_link, hook_path_link),
1650 	LSM_HOOK_INIT(path_rename, hook_path_rename),
1651 	LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1652 	LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1653 	LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1654 	LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1655 	LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1656 	LSM_HOOK_INIT(path_truncate, hook_path_truncate),
1657 
1658 	LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
1659 	LSM_HOOK_INIT(file_open, hook_file_open),
1660 	LSM_HOOK_INIT(file_truncate, hook_file_truncate),
1661 	LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
1662 	LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
1663 };
1664 
landlock_add_fs_hooks(void)1665 __init void landlock_add_fs_hooks(void)
1666 {
1667 	security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1668 			   &landlock_lsmid);
1669 }
1670 
1671 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
1672 
1673 /* clang-format off */
1674 static struct kunit_case test_cases[] = {
1675 	KUNIT_CASE(test_no_more_access),
1676 	KUNIT_CASE(test_scope_to_request_with_exec_none),
1677 	KUNIT_CASE(test_scope_to_request_with_exec_some),
1678 	KUNIT_CASE(test_scope_to_request_without_access),
1679 	KUNIT_CASE(test_is_eacces_with_none),
1680 	KUNIT_CASE(test_is_eacces_with_refer),
1681 	KUNIT_CASE(test_is_eacces_with_write),
1682 	{}
1683 };
1684 /* clang-format on */
1685 
1686 static struct kunit_suite test_suite = {
1687 	.name = "landlock_fs",
1688 	.test_cases = test_cases,
1689 };
1690 
1691 kunit_test_suite(test_suite);
1692 
1693 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
1694