xref: /linux/kernel/user.c (revision 21ca59b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The "user cache".
4  *
5  * (C) Copyright 1991-2000 Linus Torvalds
6  *
7  * We have a per-user structure to keep track of how many
8  * processes, files etc the user has claimed, in order to be
9  * able to have per-user limits for system resources.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/bitops.h>
16 #include <linux/key.h>
17 #include <linux/sched/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/export.h>
20 #include <linux/user_namespace.h>
21 #include <linux/binfmts.h>
22 #include <linux/proc_ns.h>
23 
24 #if IS_ENABLED(CONFIG_BINFMT_MISC)
25 struct binfmt_misc init_binfmt_misc = {
26 	.entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
27 	.enabled = true,
28 	.entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
29 };
30 EXPORT_SYMBOL_GPL(init_binfmt_misc);
31 #endif
32 
33 /*
34  * userns count is 1 for root user, 1 for init_uts_ns,
35  * and 1 for... ?
36  */
37 struct user_namespace init_user_ns = {
38 	.uid_map = {
39 		.nr_extents = 1,
40 		{
41 			.extent[0] = {
42 				.first = 0,
43 				.lower_first = 0,
44 				.count = 4294967295U,
45 			},
46 		},
47 	},
48 	.gid_map = {
49 		.nr_extents = 1,
50 		{
51 			.extent[0] = {
52 				.first = 0,
53 				.lower_first = 0,
54 				.count = 4294967295U,
55 			},
56 		},
57 	},
58 	.projid_map = {
59 		.nr_extents = 1,
60 		{
61 			.extent[0] = {
62 				.first = 0,
63 				.lower_first = 0,
64 				.count = 4294967295U,
65 			},
66 		},
67 	},
68 	.ns.count = REFCOUNT_INIT(3),
69 	.owner = GLOBAL_ROOT_UID,
70 	.group = GLOBAL_ROOT_GID,
71 	.ns.inum = PROC_USER_INIT_INO,
72 #ifdef CONFIG_USER_NS
73 	.ns.ops = &userns_operations,
74 #endif
75 	.flags = USERNS_INIT_FLAGS,
76 #ifdef CONFIG_KEYS
77 	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
78 	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
79 #endif
80 #if IS_ENABLED(CONFIG_BINFMT_MISC)
81 	.binfmt_misc = &init_binfmt_misc,
82 #endif
83 };
84 EXPORT_SYMBOL_GPL(init_user_ns);
85 
86 /*
87  * UID task count cache, to get fast user lookup in "alloc_uid"
88  * when changing user ID's (ie setuid() and friends).
89  */
90 
91 #define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
92 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
93 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
94 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
95 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
96 
97 static struct kmem_cache *uid_cachep;
98 static struct hlist_head uidhash_table[UIDHASH_SZ];
99 
100 /*
101  * The uidhash_lock is mostly taken from process context, but it is
102  * occasionally also taken from softirq/tasklet context, when
103  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
104  * But free_uid() is also called with local interrupts disabled, and running
105  * local_bh_enable() with local interrupts disabled is an error - we'll run
106  * softirq callbacks, and they can unconditionally enable interrupts, and
107  * the caller of free_uid() didn't expect that..
108  */
109 static DEFINE_SPINLOCK(uidhash_lock);
110 
111 /* root_user.__count is 1, for init task cred */
112 struct user_struct root_user = {
113 	.__count	= REFCOUNT_INIT(1),
114 	.uid		= GLOBAL_ROOT_UID,
115 	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
116 };
117 
118 /*
119  * These routines must be called with the uidhash spinlock held!
120  */
uid_hash_insert(struct user_struct * up,struct hlist_head * hashent)121 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
122 {
123 	hlist_add_head(&up->uidhash_node, hashent);
124 }
125 
uid_hash_remove(struct user_struct * up)126 static void uid_hash_remove(struct user_struct *up)
127 {
128 	hlist_del_init(&up->uidhash_node);
129 }
130 
uid_hash_find(kuid_t uid,struct hlist_head * hashent)131 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
132 {
133 	struct user_struct *user;
134 
135 	hlist_for_each_entry(user, hashent, uidhash_node) {
136 		if (uid_eq(user->uid, uid)) {
137 			refcount_inc(&user->__count);
138 			return user;
139 		}
140 	}
141 
142 	return NULL;
143 }
144 
user_epoll_alloc(struct user_struct * up)145 static int user_epoll_alloc(struct user_struct *up)
146 {
147 #ifdef CONFIG_EPOLL
148 	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
149 #else
150 	return 0;
151 #endif
152 }
153 
user_epoll_free(struct user_struct * up)154 static void user_epoll_free(struct user_struct *up)
155 {
156 #ifdef CONFIG_EPOLL
157 	percpu_counter_destroy(&up->epoll_watches);
158 #endif
159 }
160 
161 /* IRQs are disabled and uidhash_lock is held upon function entry.
162  * IRQ state (as stored in flags) is restored and uidhash_lock released
163  * upon function exit.
164  */
free_user(struct user_struct * up,unsigned long flags)165 static void free_user(struct user_struct *up, unsigned long flags)
166 	__releases(&uidhash_lock)
167 {
168 	uid_hash_remove(up);
169 	spin_unlock_irqrestore(&uidhash_lock, flags);
170 	user_epoll_free(up);
171 	kmem_cache_free(uid_cachep, up);
172 }
173 
174 /*
175  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
176  * caller must undo that ref with free_uid().
177  *
178  * If the user_struct could not be found, return NULL.
179  */
find_user(kuid_t uid)180 struct user_struct *find_user(kuid_t uid)
181 {
182 	struct user_struct *ret;
183 	unsigned long flags;
184 
185 	spin_lock_irqsave(&uidhash_lock, flags);
186 	ret = uid_hash_find(uid, uidhashentry(uid));
187 	spin_unlock_irqrestore(&uidhash_lock, flags);
188 	return ret;
189 }
190 
free_uid(struct user_struct * up)191 void free_uid(struct user_struct *up)
192 {
193 	unsigned long flags;
194 
195 	if (!up)
196 		return;
197 
198 	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
199 		free_user(up, flags);
200 }
201 EXPORT_SYMBOL_GPL(free_uid);
202 
alloc_uid(kuid_t uid)203 struct user_struct *alloc_uid(kuid_t uid)
204 {
205 	struct hlist_head *hashent = uidhashentry(uid);
206 	struct user_struct *up, *new;
207 
208 	spin_lock_irq(&uidhash_lock);
209 	up = uid_hash_find(uid, hashent);
210 	spin_unlock_irq(&uidhash_lock);
211 
212 	if (!up) {
213 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
214 		if (!new)
215 			return NULL;
216 
217 		new->uid = uid;
218 		refcount_set(&new->__count, 1);
219 		if (user_epoll_alloc(new)) {
220 			kmem_cache_free(uid_cachep, new);
221 			return NULL;
222 		}
223 		ratelimit_state_init(&new->ratelimit, HZ, 100);
224 		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
225 
226 		/*
227 		 * Before adding this, check whether we raced
228 		 * on adding the same user already..
229 		 */
230 		spin_lock_irq(&uidhash_lock);
231 		up = uid_hash_find(uid, hashent);
232 		if (up) {
233 			user_epoll_free(new);
234 			kmem_cache_free(uid_cachep, new);
235 		} else {
236 			uid_hash_insert(new, hashent);
237 			up = new;
238 		}
239 		spin_unlock_irq(&uidhash_lock);
240 	}
241 
242 	return up;
243 }
244 
uid_cache_init(void)245 static int __init uid_cache_init(void)
246 {
247 	int n;
248 
249 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
250 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
251 
252 	for(n = 0; n < UIDHASH_SZ; ++n)
253 		INIT_HLIST_HEAD(uidhash_table + n);
254 
255 	if (user_epoll_alloc(&root_user))
256 		panic("root_user epoll percpu counter alloc failed");
257 
258 	/* Insert the root user immediately (init already runs as root) */
259 	spin_lock_irq(&uidhash_lock);
260 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
261 	spin_unlock_irq(&uidhash_lock);
262 
263 	return 0;
264 }
265 subsys_initcall(uid_cache_init);
266