xref: /linux/security/selinux/ss/ebitmap.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the extensible bitmap type.
4  *
5  * Author : Stephen Smalley, <sds@tycho.nsa.gov>
6  */
7 /*
8  * Updated: Hewlett-Packard <paul@paul-moore.com>
9  *
10  *      Added support to import/export the NetLabel category bitmap
11  *
12  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
13  */
14 /*
15  * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
16  *      Applied standard bit operations to improve bitmap scanning.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <net/netlabel.h>
23 #include "ebitmap.h"
24 #include "policydb.h"
25 
26 #define BITS_PER_U64	(sizeof(u64) * 8)
27 
28 static struct kmem_cache *ebitmap_node_cachep;
29 
30 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
31 {
32 	struct ebitmap_node *n1, *n2;
33 
34 	if (e1->highbit != e2->highbit)
35 		return 0;
36 
37 	n1 = e1->node;
38 	n2 = e2->node;
39 	while (n1 && n2 &&
40 	       (n1->startbit == n2->startbit) &&
41 	       !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
42 		n1 = n1->next;
43 		n2 = n2->next;
44 	}
45 
46 	if (n1 || n2)
47 		return 0;
48 
49 	return 1;
50 }
51 
52 int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
53 {
54 	struct ebitmap_node *n, *new, *prev;
55 
56 	ebitmap_init(dst);
57 	n = src->node;
58 	prev = NULL;
59 	while (n) {
60 		new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
61 		if (!new) {
62 			ebitmap_destroy(dst);
63 			return -ENOMEM;
64 		}
65 		new->startbit = n->startbit;
66 		memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
67 		new->next = NULL;
68 		if (prev)
69 			prev->next = new;
70 		else
71 			dst->node = new;
72 		prev = new;
73 		n = n->next;
74 	}
75 
76 	dst->highbit = src->highbit;
77 	return 0;
78 }
79 
80 #ifdef CONFIG_NETLABEL
81 /**
82  * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
83  * @ebmap: the ebitmap to export
84  * @catmap: the NetLabel category bitmap
85  *
86  * Description:
87  * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
88  * Returns zero on success, negative values on error.
89  *
90  */
91 int ebitmap_netlbl_export(struct ebitmap *ebmap,
92 			  struct netlbl_lsm_catmap **catmap)
93 {
94 	struct ebitmap_node *e_iter = ebmap->node;
95 	unsigned long e_map;
96 	u32 offset;
97 	unsigned int iter;
98 	int rc;
99 
100 	if (e_iter == NULL) {
101 		*catmap = NULL;
102 		return 0;
103 	}
104 
105 	if (*catmap != NULL)
106 		netlbl_catmap_free(*catmap);
107 	*catmap = NULL;
108 
109 	while (e_iter) {
110 		offset = e_iter->startbit;
111 		for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
112 			e_map = e_iter->maps[iter];
113 			if (e_map != 0) {
114 				rc = netlbl_catmap_setlong(catmap,
115 							   offset,
116 							   e_map,
117 							   GFP_ATOMIC);
118 				if (rc != 0)
119 					goto netlbl_export_failure;
120 			}
121 			offset += EBITMAP_UNIT_SIZE;
122 		}
123 		e_iter = e_iter->next;
124 	}
125 
126 	return 0;
127 
128 netlbl_export_failure:
129 	netlbl_catmap_free(*catmap);
130 	return -ENOMEM;
131 }
132 
133 /**
134  * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
135  * @ebmap: the ebitmap to import
136  * @catmap: the NetLabel category bitmap
137  *
138  * Description:
139  * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
140  * Returns zero on success, negative values on error.
141  *
142  */
143 int ebitmap_netlbl_import(struct ebitmap *ebmap,
144 			  struct netlbl_lsm_catmap *catmap)
145 {
146 	int rc;
147 	struct ebitmap_node *e_iter = NULL;
148 	struct ebitmap_node *e_prev = NULL;
149 	u32 offset = 0, idx;
150 	unsigned long bitmap;
151 
152 	for (;;) {
153 		rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
154 		if (rc < 0)
155 			goto netlbl_import_failure;
156 		if (offset == (u32)-1)
157 			return 0;
158 
159 		/* don't waste ebitmap space if the netlabel bitmap is empty */
160 		if (bitmap == 0) {
161 			offset += EBITMAP_UNIT_SIZE;
162 			continue;
163 		}
164 
165 		if (e_iter == NULL ||
166 		    offset >= e_iter->startbit + EBITMAP_SIZE) {
167 			e_prev = e_iter;
168 			e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
169 			if (e_iter == NULL)
170 				goto netlbl_import_failure;
171 			e_iter->startbit = offset - (offset % EBITMAP_SIZE);
172 			if (e_prev == NULL)
173 				ebmap->node = e_iter;
174 			else
175 				e_prev->next = e_iter;
176 			ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
177 		}
178 
179 		/* offset will always be aligned to an unsigned long */
180 		idx = EBITMAP_NODE_INDEX(e_iter, offset);
181 		e_iter->maps[idx] = bitmap;
182 
183 		/* next */
184 		offset += EBITMAP_UNIT_SIZE;
185 	}
186 
187 	/* NOTE: we should never reach this return */
188 	return 0;
189 
190 netlbl_import_failure:
191 	ebitmap_destroy(ebmap);
192 	return -ENOMEM;
193 }
194 #endif /* CONFIG_NETLABEL */
195 
196 /*
197  * Check to see if all the bits set in e2 are also set in e1. Optionally,
198  * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
199  * last_e2bit.
200  */
201 int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
202 {
203 	struct ebitmap_node *n1, *n2;
204 	int i;
205 
206 	if (e1->highbit < e2->highbit)
207 		return 0;
208 
209 	n1 = e1->node;
210 	n2 = e2->node;
211 
212 	while (n1 && n2 && (n1->startbit <= n2->startbit)) {
213 		if (n1->startbit < n2->startbit) {
214 			n1 = n1->next;
215 			continue;
216 		}
217 		for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
218 			i--;	/* Skip trailing NULL map entries */
219 		if (last_e2bit && (i >= 0)) {
220 			u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
221 					 __fls(n2->maps[i]);
222 			if (lastsetbit > last_e2bit)
223 				return 0;
224 		}
225 
226 		while (i >= 0) {
227 			if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
228 				return 0;
229 			i--;
230 		}
231 
232 		n1 = n1->next;
233 		n2 = n2->next;
234 	}
235 
236 	if (n2)
237 		return 0;
238 
239 	return 1;
240 }
241 
242 int ebitmap_get_bit(struct ebitmap *e, unsigned long bit)
243 {
244 	struct ebitmap_node *n;
245 
246 	if (e->highbit < bit)
247 		return 0;
248 
249 	n = e->node;
250 	while (n && (n->startbit <= bit)) {
251 		if ((n->startbit + EBITMAP_SIZE) > bit)
252 			return ebitmap_node_get_bit(n, bit);
253 		n = n->next;
254 	}
255 
256 	return 0;
257 }
258 
259 int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
260 {
261 	struct ebitmap_node *n, *prev, *new;
262 
263 	prev = NULL;
264 	n = e->node;
265 	while (n && n->startbit <= bit) {
266 		if ((n->startbit + EBITMAP_SIZE) > bit) {
267 			if (value) {
268 				ebitmap_node_set_bit(n, bit);
269 			} else {
270 				unsigned int s;
271 
272 				ebitmap_node_clr_bit(n, bit);
273 
274 				s = find_first_bit(n->maps, EBITMAP_SIZE);
275 				if (s < EBITMAP_SIZE)
276 					return 0;
277 
278 				/* drop this node from the bitmap */
279 				if (!n->next) {
280 					/*
281 					 * this was the highest map
282 					 * within the bitmap
283 					 */
284 					if (prev)
285 						e->highbit = prev->startbit
286 							     + EBITMAP_SIZE;
287 					else
288 						e->highbit = 0;
289 				}
290 				if (prev)
291 					prev->next = n->next;
292 				else
293 					e->node = n->next;
294 				kmem_cache_free(ebitmap_node_cachep, n);
295 			}
296 			return 0;
297 		}
298 		prev = n;
299 		n = n->next;
300 	}
301 
302 	if (!value)
303 		return 0;
304 
305 	new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
306 	if (!new)
307 		return -ENOMEM;
308 
309 	new->startbit = bit - (bit % EBITMAP_SIZE);
310 	ebitmap_node_set_bit(new, bit);
311 
312 	if (!n)
313 		/* this node will be the highest map within the bitmap */
314 		e->highbit = new->startbit + EBITMAP_SIZE;
315 
316 	if (prev) {
317 		new->next = prev->next;
318 		prev->next = new;
319 	} else {
320 		new->next = e->node;
321 		e->node = new;
322 	}
323 
324 	return 0;
325 }
326 
327 void ebitmap_destroy(struct ebitmap *e)
328 {
329 	struct ebitmap_node *n, *temp;
330 
331 	if (!e)
332 		return;
333 
334 	n = e->node;
335 	while (n) {
336 		temp = n;
337 		n = n->next;
338 		kmem_cache_free(ebitmap_node_cachep, temp);
339 	}
340 
341 	e->highbit = 0;
342 	e->node = NULL;
343 	return;
344 }
345 
346 int ebitmap_read(struct ebitmap *e, void *fp)
347 {
348 	struct ebitmap_node *n = NULL;
349 	u32 mapunit, count, startbit, index;
350 	u64 map;
351 	__le32 buf[3];
352 	int rc, i;
353 
354 	ebitmap_init(e);
355 
356 	rc = next_entry(buf, fp, sizeof buf);
357 	if (rc < 0)
358 		goto out;
359 
360 	mapunit = le32_to_cpu(buf[0]);
361 	e->highbit = le32_to_cpu(buf[1]);
362 	count = le32_to_cpu(buf[2]);
363 
364 	if (mapunit != BITS_PER_U64) {
365 		pr_err("SELinux: ebitmap: map size %u does not "
366 		       "match my size %zd (high bit was %d)\n",
367 		       mapunit, BITS_PER_U64, e->highbit);
368 		goto bad;
369 	}
370 
371 	/* round up e->highbit */
372 	e->highbit += EBITMAP_SIZE - 1;
373 	e->highbit -= (e->highbit % EBITMAP_SIZE);
374 
375 	if (!e->highbit) {
376 		e->node = NULL;
377 		goto ok;
378 	}
379 
380 	if (e->highbit && !count)
381 		goto bad;
382 
383 	for (i = 0; i < count; i++) {
384 		rc = next_entry(&startbit, fp, sizeof(u32));
385 		if (rc < 0) {
386 			pr_err("SELinux: ebitmap: truncated map\n");
387 			goto bad;
388 		}
389 		startbit = le32_to_cpu(startbit);
390 
391 		if (startbit & (mapunit - 1)) {
392 			pr_err("SELinux: ebitmap start bit (%d) is "
393 			       "not a multiple of the map unit size (%u)\n",
394 			       startbit, mapunit);
395 			goto bad;
396 		}
397 		if (startbit > e->highbit - mapunit) {
398 			pr_err("SELinux: ebitmap start bit (%d) is "
399 			       "beyond the end of the bitmap (%u)\n",
400 			       startbit, (e->highbit - mapunit));
401 			goto bad;
402 		}
403 
404 		if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
405 			struct ebitmap_node *tmp;
406 			tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL);
407 			if (!tmp) {
408 				pr_err("SELinux: ebitmap: out of memory\n");
409 				rc = -ENOMEM;
410 				goto bad;
411 			}
412 			/* round down */
413 			tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
414 			if (n)
415 				n->next = tmp;
416 			else
417 				e->node = tmp;
418 			n = tmp;
419 		} else if (startbit <= n->startbit) {
420 			pr_err("SELinux: ebitmap: start bit %d"
421 			       " comes after start bit %d\n",
422 			       startbit, n->startbit);
423 			goto bad;
424 		}
425 
426 		rc = next_entry(&map, fp, sizeof(u64));
427 		if (rc < 0) {
428 			pr_err("SELinux: ebitmap: truncated map\n");
429 			goto bad;
430 		}
431 		map = le64_to_cpu(map);
432 
433 		index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
434 		while (map) {
435 			n->maps[index++] = map & (-1UL);
436 			map = EBITMAP_SHIFT_UNIT_SIZE(map);
437 		}
438 	}
439 ok:
440 	rc = 0;
441 out:
442 	return rc;
443 bad:
444 	if (!rc)
445 		rc = -EINVAL;
446 	ebitmap_destroy(e);
447 	goto out;
448 }
449 
450 int ebitmap_write(struct ebitmap *e, void *fp)
451 {
452 	struct ebitmap_node *n;
453 	u32 count;
454 	__le32 buf[3];
455 	u64 map;
456 	int bit, last_bit, last_startbit, rc;
457 
458 	buf[0] = cpu_to_le32(BITS_PER_U64);
459 
460 	count = 0;
461 	last_bit = 0;
462 	last_startbit = -1;
463 	ebitmap_for_each_positive_bit(e, n, bit) {
464 		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
465 			count++;
466 			last_startbit = rounddown(bit, BITS_PER_U64);
467 		}
468 		last_bit = roundup(bit + 1, BITS_PER_U64);
469 	}
470 	buf[1] = cpu_to_le32(last_bit);
471 	buf[2] = cpu_to_le32(count);
472 
473 	rc = put_entry(buf, sizeof(u32), 3, fp);
474 	if (rc)
475 		return rc;
476 
477 	map = 0;
478 	last_startbit = INT_MIN;
479 	ebitmap_for_each_positive_bit(e, n, bit) {
480 		if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
481 			__le64 buf64[1];
482 
483 			/* this is the very first bit */
484 			if (!map) {
485 				last_startbit = rounddown(bit, BITS_PER_U64);
486 				map = (u64)1 << (bit - last_startbit);
487 				continue;
488 			}
489 
490 			/* write the last node */
491 			buf[0] = cpu_to_le32(last_startbit);
492 			rc = put_entry(buf, sizeof(u32), 1, fp);
493 			if (rc)
494 				return rc;
495 
496 			buf64[0] = cpu_to_le64(map);
497 			rc = put_entry(buf64, sizeof(u64), 1, fp);
498 			if (rc)
499 				return rc;
500 
501 			/* set up for the next node */
502 			map = 0;
503 			last_startbit = rounddown(bit, BITS_PER_U64);
504 		}
505 		map |= (u64)1 << (bit - last_startbit);
506 	}
507 	/* write the last node */
508 	if (map) {
509 		__le64 buf64[1];
510 
511 		/* write the last node */
512 		buf[0] = cpu_to_le32(last_startbit);
513 		rc = put_entry(buf, sizeof(u32), 1, fp);
514 		if (rc)
515 			return rc;
516 
517 		buf64[0] = cpu_to_le64(map);
518 		rc = put_entry(buf64, sizeof(u64), 1, fp);
519 		if (rc)
520 			return rc;
521 	}
522 	return 0;
523 }
524 
525 void __init ebitmap_cache_init(void)
526 {
527 	ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
528 							sizeof(struct ebitmap_node),
529 							0, SLAB_PANIC, NULL);
530 }
531