xref: /linux/security/selinux/ss/sidtab.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the SID table type.
4  *
5  * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6  * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
7  *
8  * Copyright (C) 2018 Red Hat, Inc.
9  */
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <asm/barrier.h>
16 #include "flask.h"
17 #include "security.h"
18 #include "sidtab.h"
19 
20 int sidtab_init(struct sidtab *s)
21 {
22 	u32 i;
23 
24 	memset(s->roots, 0, sizeof(s->roots));
25 
26 	/* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
27 	for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
28 		s->rcache[i] = SIDTAB_MAX;
29 
30 	for (i = 0; i < SECINITSID_NUM; i++)
31 		s->isids[i].set = 0;
32 
33 	s->count = 0;
34 	s->convert = NULL;
35 
36 	spin_lock_init(&s->lock);
37 	return 0;
38 }
39 
40 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
41 {
42 	struct sidtab_isid_entry *entry;
43 	int rc;
44 
45 	if (sid == 0 || sid > SECINITSID_NUM)
46 		return -EINVAL;
47 
48 	entry = &s->isids[sid - 1];
49 
50 	rc = context_cpy(&entry->context, context);
51 	if (rc)
52 		return rc;
53 
54 	entry->set = 1;
55 	return 0;
56 }
57 
58 static u32 sidtab_level_from_count(u32 count)
59 {
60 	u32 capacity = SIDTAB_LEAF_ENTRIES;
61 	u32 level = 0;
62 
63 	while (count > capacity) {
64 		capacity <<= SIDTAB_INNER_SHIFT;
65 		++level;
66 	}
67 	return level;
68 }
69 
70 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
71 {
72 	u32 l;
73 
74 	if (!s->roots[0].ptr_leaf) {
75 		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
76 					       GFP_ATOMIC);
77 		if (!s->roots[0].ptr_leaf)
78 			return -ENOMEM;
79 	}
80 	for (l = 1; l <= level; ++l)
81 		if (!s->roots[l].ptr_inner) {
82 			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
83 							GFP_ATOMIC);
84 			if (!s->roots[l].ptr_inner)
85 				return -ENOMEM;
86 			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
87 		}
88 	return 0;
89 }
90 
91 static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
92 {
93 	union sidtab_entry_inner *entry;
94 	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
95 
96 	/* find the level of the subtree we need */
97 	level = sidtab_level_from_count(index + 1);
98 	capacity_shift = level * SIDTAB_INNER_SHIFT;
99 
100 	/* allocate roots if needed */
101 	if (alloc && sidtab_alloc_roots(s, level) != 0)
102 		return NULL;
103 
104 	/* lookup inside the subtree */
105 	entry = &s->roots[level];
106 	while (level != 0) {
107 		capacity_shift -= SIDTAB_INNER_SHIFT;
108 		--level;
109 
110 		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
111 		leaf_index &= ((u32)1 << capacity_shift) - 1;
112 
113 		if (!entry->ptr_inner) {
114 			if (alloc)
115 				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
116 							   GFP_ATOMIC);
117 			if (!entry->ptr_inner)
118 				return NULL;
119 		}
120 	}
121 	if (!entry->ptr_leaf) {
122 		if (alloc)
123 			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
124 						  GFP_ATOMIC);
125 		if (!entry->ptr_leaf)
126 			return NULL;
127 	}
128 	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
129 }
130 
131 static struct context *sidtab_lookup(struct sidtab *s, u32 index)
132 {
133 	/* read entries only after reading count */
134 	u32 count = smp_load_acquire(&s->count);
135 
136 	if (index >= count)
137 		return NULL;
138 
139 	return sidtab_do_lookup(s, index, 0);
140 }
141 
142 static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
143 {
144 	return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
145 }
146 
147 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
148 {
149 	struct context *context;
150 
151 	if (sid != 0) {
152 		if (sid > SECINITSID_NUM)
153 			context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
154 		else
155 			context = sidtab_lookup_initial(s, sid);
156 		if (context && (!context->len || force))
157 			return context;
158 	}
159 
160 	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
161 }
162 
163 struct context *sidtab_search(struct sidtab *s, u32 sid)
164 {
165 	return sidtab_search_core(s, sid, 0);
166 }
167 
168 struct context *sidtab_search_force(struct sidtab *s, u32 sid)
169 {
170 	return sidtab_search_core(s, sid, 1);
171 }
172 
173 static int sidtab_find_context(union sidtab_entry_inner entry,
174 			       u32 *pos, u32 count, u32 level,
175 			       struct context *context, u32 *index)
176 {
177 	int rc;
178 	u32 i;
179 
180 	if (level != 0) {
181 		struct sidtab_node_inner *node = entry.ptr_inner;
182 
183 		i = 0;
184 		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
185 			rc = sidtab_find_context(node->entries[i],
186 						 pos, count, level - 1,
187 						 context, index);
188 			if (rc == 0)
189 				return 0;
190 			i++;
191 		}
192 	} else {
193 		struct sidtab_node_leaf *node = entry.ptr_leaf;
194 
195 		i = 0;
196 		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
197 			if (context_cmp(&node->entries[i].context, context)) {
198 				*index = *pos;
199 				return 0;
200 			}
201 			(*pos)++;
202 			i++;
203 		}
204 	}
205 	return -ENOENT;
206 }
207 
208 static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
209 {
210 	while (pos > 0) {
211 		WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
212 		--pos;
213 	}
214 	WRITE_ONCE(s->rcache[0], index);
215 }
216 
217 static void sidtab_rcache_push(struct sidtab *s, u32 index)
218 {
219 	sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
220 }
221 
222 static int sidtab_rcache_search(struct sidtab *s, struct context *context,
223 				u32 *index)
224 {
225 	u32 i;
226 
227 	for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
228 		u32 v = READ_ONCE(s->rcache[i]);
229 
230 		if (v >= SIDTAB_MAX)
231 			continue;
232 
233 		if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
234 			sidtab_rcache_update(s, v, i);
235 			*index = v;
236 			return 0;
237 		}
238 	}
239 	return -ENOENT;
240 }
241 
242 static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
243 				 u32 *index)
244 {
245 	unsigned long flags;
246 	u32 count, count_locked, level, pos;
247 	struct sidtab_convert_params *convert;
248 	struct context *dst, *dst_convert;
249 	int rc;
250 
251 	rc = sidtab_rcache_search(s, context, index);
252 	if (rc == 0)
253 		return 0;
254 
255 	/* read entries only after reading count */
256 	count = smp_load_acquire(&s->count);
257 	level = sidtab_level_from_count(count);
258 
259 	pos = 0;
260 	rc = sidtab_find_context(s->roots[level], &pos, count, level,
261 				 context, index);
262 	if (rc == 0) {
263 		sidtab_rcache_push(s, *index);
264 		return 0;
265 	}
266 
267 	/* lock-free search failed: lock, re-search, and insert if not found */
268 	spin_lock_irqsave(&s->lock, flags);
269 
270 	convert = s->convert;
271 	count_locked = s->count;
272 	level = sidtab_level_from_count(count_locked);
273 
274 	/* if count has changed before we acquired the lock, then catch up */
275 	while (count < count_locked) {
276 		if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
277 			sidtab_rcache_push(s, count);
278 			*index = count;
279 			rc = 0;
280 			goto out_unlock;
281 		}
282 		++count;
283 	}
284 
285 	/* bail out if we already reached max entries */
286 	rc = -EOVERFLOW;
287 	if (count >= SIDTAB_MAX)
288 		goto out_unlock;
289 
290 	/* insert context into new entry */
291 	rc = -ENOMEM;
292 	dst = sidtab_do_lookup(s, count, 1);
293 	if (!dst)
294 		goto out_unlock;
295 
296 	rc = context_cpy(dst, context);
297 	if (rc)
298 		goto out_unlock;
299 
300 	/*
301 	 * if we are building a new sidtab, we need to convert the context
302 	 * and insert it there as well
303 	 */
304 	if (convert) {
305 		rc = -ENOMEM;
306 		dst_convert = sidtab_do_lookup(convert->target, count, 1);
307 		if (!dst_convert) {
308 			context_destroy(dst);
309 			goto out_unlock;
310 		}
311 
312 		rc = convert->func(context, dst_convert, convert->args);
313 		if (rc) {
314 			context_destroy(dst);
315 			goto out_unlock;
316 		}
317 
318 		/* at this point we know the insert won't fail */
319 		convert->target->count = count + 1;
320 	}
321 
322 	if (context->len)
323 		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
324 			context->str);
325 
326 	sidtab_rcache_push(s, count);
327 	*index = count;
328 
329 	/* write entries before writing new count */
330 	smp_store_release(&s->count, count + 1);
331 
332 	rc = 0;
333 out_unlock:
334 	spin_unlock_irqrestore(&s->lock, flags);
335 	return rc;
336 }
337 
338 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
339 {
340 	int rc;
341 	u32 i;
342 
343 	for (i = 0; i < SECINITSID_NUM; i++) {
344 		struct sidtab_isid_entry *entry = &s->isids[i];
345 
346 		if (entry->set && context_cmp(context, &entry->context)) {
347 			*sid = i + 1;
348 			return 0;
349 		}
350 	}
351 
352 	rc = sidtab_reverse_lookup(s, context, sid);
353 	if (rc)
354 		return rc;
355 	*sid += SECINITSID_NUM + 1;
356 	return 0;
357 }
358 
359 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
360 			       union sidtab_entry_inner *esrc,
361 			       u32 *pos, u32 count, u32 level,
362 			       struct sidtab_convert_params *convert)
363 {
364 	int rc;
365 	u32 i;
366 
367 	if (level != 0) {
368 		if (!edst->ptr_inner) {
369 			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
370 						  GFP_KERNEL);
371 			if (!edst->ptr_inner)
372 				return -ENOMEM;
373 		}
374 		i = 0;
375 		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
376 			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
377 						 &esrc->ptr_inner->entries[i],
378 						 pos, count, level - 1,
379 						 convert);
380 			if (rc)
381 				return rc;
382 			i++;
383 		}
384 	} else {
385 		if (!edst->ptr_leaf) {
386 			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
387 						 GFP_KERNEL);
388 			if (!edst->ptr_leaf)
389 				return -ENOMEM;
390 		}
391 		i = 0;
392 		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
393 			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
394 					   &edst->ptr_leaf->entries[i].context,
395 					   convert->args);
396 			if (rc)
397 				return rc;
398 			(*pos)++;
399 			i++;
400 		}
401 		cond_resched();
402 	}
403 	return 0;
404 }
405 
406 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
407 {
408 	unsigned long flags;
409 	u32 count, level, pos;
410 	int rc;
411 
412 	spin_lock_irqsave(&s->lock, flags);
413 
414 	/* concurrent policy loads are not allowed */
415 	if (s->convert) {
416 		spin_unlock_irqrestore(&s->lock, flags);
417 		return -EBUSY;
418 	}
419 
420 	count = s->count;
421 	level = sidtab_level_from_count(count);
422 
423 	/* allocate last leaf in the new sidtab (to avoid race with
424 	 * live convert)
425 	 */
426 	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
427 	if (rc) {
428 		spin_unlock_irqrestore(&s->lock, flags);
429 		return rc;
430 	}
431 
432 	/* set count in case no new entries are added during conversion */
433 	params->target->count = count;
434 
435 	/* enable live convert of new entries */
436 	s->convert = params;
437 
438 	/* we can safely do the rest of the conversion outside the lock */
439 	spin_unlock_irqrestore(&s->lock, flags);
440 
441 	pr_info("SELinux:  Converting %u SID table entries...\n", count);
442 
443 	/* convert all entries not covered by live convert */
444 	pos = 0;
445 	rc = sidtab_convert_tree(&params->target->roots[level],
446 				 &s->roots[level], &pos, count, level, params);
447 	if (rc) {
448 		/* we need to keep the old table - disable live convert */
449 		spin_lock_irqsave(&s->lock, flags);
450 		s->convert = NULL;
451 		spin_unlock_irqrestore(&s->lock, flags);
452 	}
453 	return rc;
454 }
455 
456 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
457 {
458 	u32 i;
459 
460 	if (level != 0) {
461 		struct sidtab_node_inner *node = entry.ptr_inner;
462 
463 		if (!node)
464 			return;
465 
466 		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
467 			sidtab_destroy_tree(node->entries[i], level - 1);
468 		kfree(node);
469 	} else {
470 		struct sidtab_node_leaf *node = entry.ptr_leaf;
471 
472 		if (!node)
473 			return;
474 
475 		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
476 			context_destroy(&node->entries[i].context);
477 		kfree(node);
478 	}
479 }
480 
481 void sidtab_destroy(struct sidtab *s)
482 {
483 	u32 i, level;
484 
485 	for (i = 0; i < SECINITSID_NUM; i++)
486 		if (s->isids[i].set)
487 			context_destroy(&s->isids[i].context);
488 
489 	level = SIDTAB_MAX_LEVEL;
490 	while (level && !s->roots[level].ptr_inner)
491 		--level;
492 
493 	sidtab_destroy_tree(s->roots[level], level);
494 }
495