1 /*	$NetBSD: subr_kmem.c,v 1.62 2016/02/29 00:34:17 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran and Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2006 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 /*
59  * Allocator of kernel wired memory. This allocator has some debug features
60  * enabled with "option DIAGNOSTIC" and "option DEBUG".
61  */
62 
63 /*
64  * KMEM_SIZE: detect alloc/free size mismatch bugs.
65  *	Prefix each allocations with a fixed-sized, aligned header and record
66  *	the exact user-requested allocation size in it. When freeing, compare
67  *	it with kmem_free's "size" argument.
68  *
69  * KMEM_REDZONE: detect overrun bugs.
70  *	Add a 2-byte pattern (allocate one more memory chunk if needed) at the
71  *	end of each allocated buffer. Check this pattern on kmem_free.
72  *
73  * These options are enabled on DIAGNOSTIC.
74  *
75  *  |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
76  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
77  *  |/////|     |     |     |     |     |     |     |     |   |*|**|UU|
78  *  |/HSZ/|     |     |     |     |     |     |     |     |   |*|**|UU|
79  *  |/////|     |     |     |     |     |     |     |     |   |*|**|UU|
80  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+--+--+
81  *  |Size |    Buffer usable by the caller (requested size)   |RedZ|Unused\
82  */
83 
84 /*
85  * KMEM_POISON: detect modify-after-free bugs.
86  *	Fill freed (in the sense of kmem_free) memory with a garbage pattern.
87  *	Check the pattern on allocation.
88  *
89  * KMEM_GUARD
90  *	A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
91  *	in. See the comment below for what kind of bugs it tries to detect. Even
92  *	if compiled in, it's disabled by default because it's very expensive.
93  *	You can enable it on boot by:
94  *		boot -d
95  *		db> w kmem_guard_depth 0t30000
96  *		db> c
97  *
98  *	The default value of kmem_guard_depth is 0, which means disabled.
99  *	It can be changed by KMEM_GUARD_DEPTH kernel config option.
100  */
101 
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.62 2016/02/29 00:34:17 chs Exp $");
104 
105 #include <sys/param.h>
106 #include <sys/callback.h>
107 #include <sys/kmem.h>
108 #include <sys/pool.h>
109 #include <sys/debug.h>
110 #include <sys/lockdebug.h>
111 #include <sys/cpu.h>
112 
113 #include <uvm/uvm_extern.h>
114 #include <uvm/uvm_map.h>
115 
116 #include <lib/libkern/libkern.h>
117 
118 struct kmem_cache_info {
119 	size_t		kc_size;
120 	const char *	kc_name;
121 };
122 
123 static const struct kmem_cache_info kmem_cache_sizes[] = {
124 	{  8, "kmem-8" },
125 	{ 16, "kmem-16" },
126 	{ 24, "kmem-24" },
127 	{ 32, "kmem-32" },
128 	{ 40, "kmem-40" },
129 	{ 48, "kmem-48" },
130 	{ 56, "kmem-56" },
131 	{ 64, "kmem-64" },
132 	{ 80, "kmem-80" },
133 	{ 96, "kmem-96" },
134 	{ 112, "kmem-112" },
135 	{ 128, "kmem-128" },
136 	{ 160, "kmem-160" },
137 	{ 192, "kmem-192" },
138 	{ 224, "kmem-224" },
139 	{ 256, "kmem-256" },
140 	{ 320, "kmem-320" },
141 	{ 384, "kmem-384" },
142 	{ 448, "kmem-448" },
143 	{ 512, "kmem-512" },
144 	{ 768, "kmem-768" },
145 	{ 1024, "kmem-1024" },
146 	{ 0, NULL }
147 };
148 
149 static const struct kmem_cache_info kmem_cache_big_sizes[] = {
150 	{ 2048, "kmem-2048" },
151 	{ 4096, "kmem-4096" },
152 	{ 8192, "kmem-8192" },
153 	{ 16384, "kmem-16384" },
154 	{ 0, NULL }
155 };
156 
157 /*
158  * KMEM_ALIGN is the smallest guaranteed alignment and also the
159  * smallest allocateable quantum.
160  * Every cache size >= CACHE_LINE_SIZE gets CACHE_LINE_SIZE alignment.
161  */
162 #define	KMEM_ALIGN		8
163 #define	KMEM_SHIFT		3
164 #define	KMEM_MAXSIZE		1024
165 #define	KMEM_CACHE_COUNT	(KMEM_MAXSIZE >> KMEM_SHIFT)
166 
167 static pool_cache_t kmem_cache[KMEM_CACHE_COUNT] __cacheline_aligned;
168 static size_t kmem_cache_maxidx __read_mostly;
169 
170 #define	KMEM_BIG_ALIGN		2048
171 #define	KMEM_BIG_SHIFT		11
172 #define	KMEM_BIG_MAXSIZE	16384
173 #define	KMEM_CACHE_BIG_COUNT	(KMEM_BIG_MAXSIZE >> KMEM_BIG_SHIFT)
174 
175 static pool_cache_t kmem_cache_big[KMEM_CACHE_BIG_COUNT] __cacheline_aligned;
176 static size_t kmem_cache_big_maxidx __read_mostly;
177 
178 #if defined(DIAGNOSTIC) && defined(_HARDKERNEL)
179 #define	KMEM_SIZE
180 #define	KMEM_REDZONE
181 #endif /* defined(DIAGNOSTIC) */
182 
183 #if defined(DEBUG) && defined(_HARDKERNEL)
184 #define	KMEM_SIZE
185 #define	KMEM_POISON
186 #define	KMEM_GUARD
187 static void *kmem_freecheck;
188 #endif /* defined(DEBUG) */
189 
190 #if defined(KMEM_POISON)
191 static int kmem_poison_ctor(void *, void *, int);
192 static void kmem_poison_fill(void *, size_t);
193 static void kmem_poison_check(void *, size_t);
194 #else /* defined(KMEM_POISON) */
195 #define	kmem_poison_fill(p, sz)		/* nothing */
196 #define	kmem_poison_check(p, sz)	/* nothing */
197 #endif /* defined(KMEM_POISON) */
198 
199 #if defined(KMEM_REDZONE)
200 #define	REDZONE_SIZE	2
201 static void kmem_redzone_fill(void *, size_t);
202 static void kmem_redzone_check(void *, size_t);
203 #else /* defined(KMEM_REDZONE) */
204 #define	REDZONE_SIZE	0
205 #define	kmem_redzone_fill(p, sz)		/* nothing */
206 #define	kmem_redzone_check(p, sz)	/* nothing */
207 #endif /* defined(KMEM_REDZONE) */
208 
209 #if defined(KMEM_SIZE)
210 struct kmem_header {
211 	size_t		size;
212 } __aligned(KMEM_ALIGN);
213 #define	SIZE_SIZE	sizeof(struct kmem_header)
214 static void kmem_size_set(void *, size_t);
215 static void kmem_size_check(void *, size_t);
216 #else
217 #define	SIZE_SIZE	0
218 #define	kmem_size_set(p, sz)	/* nothing */
219 #define	kmem_size_check(p, sz)	/* nothing */
220 #endif
221 
222 #if defined(KMEM_GUARD)
223 #ifndef KMEM_GUARD_DEPTH
224 #define KMEM_GUARD_DEPTH 0
225 #endif
226 struct kmem_guard {
227 	u_int		kg_depth;
228 	intptr_t *	kg_fifo;
229 	u_int		kg_rotor;
230 	vmem_t *	kg_vmem;
231 };
232 
233 static bool	kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
234 static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
235 static void kmem_guard_free(struct kmem_guard *, size_t, void *);
236 
237 int kmem_guard_depth = KMEM_GUARD_DEPTH;
238 static bool kmem_guard_enabled;
239 static struct kmem_guard kmem_guard;
240 #endif /* defined(KMEM_GUARD) */
241 
242 CTASSERT(KM_SLEEP == PR_WAITOK);
243 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
244 
245 /*
246  * kmem_intr_alloc: allocate wired memory.
247  */
248 
249 void *
kmem_intr_alloc(size_t requested_size,km_flag_t kmflags)250 kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
251 {
252 	size_t allocsz, index;
253 	size_t size;
254 	pool_cache_t pc;
255 	uint8_t *p;
256 
257 	KASSERT(requested_size > 0);
258 
259 #ifdef KMEM_GUARD
260 	if (kmem_guard_enabled) {
261 		return kmem_guard_alloc(&kmem_guard, requested_size,
262 		    (kmflags & KM_SLEEP) != 0);
263 	}
264 #endif
265 	size = kmem_roundup_size(requested_size);
266 	allocsz = size + SIZE_SIZE;
267 
268 #ifdef KMEM_REDZONE
269 	if (size - requested_size < REDZONE_SIZE) {
270 		/* If there isn't enough space in the padding, allocate
271 		 * one more memory chunk for the red zone. */
272 		allocsz += kmem_roundup_size(REDZONE_SIZE);
273 	}
274 #endif
275 
276 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
277 	    < kmem_cache_maxidx) {
278 		pc = kmem_cache[index];
279 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
280 	    < kmem_cache_big_maxidx) {
281 		pc = kmem_cache_big[index];
282 	} else {
283 		int ret = uvm_km_kmem_alloc(kmem_va_arena,
284 		    (vsize_t)round_page(size),
285 		    ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
286 		     | VM_INSTANTFIT, (vmem_addr_t *)&p);
287 		if (ret) {
288 			return NULL;
289 		}
290 		FREECHECK_OUT(&kmem_freecheck, p);
291 		return p;
292 	}
293 
294 	p = pool_cache_get(pc, kmflags);
295 
296 	if (__predict_true(p != NULL)) {
297 		kmem_poison_check(p, allocsz);
298 		FREECHECK_OUT(&kmem_freecheck, p);
299 		kmem_size_set(p, requested_size);
300 		kmem_redzone_fill(p, requested_size + SIZE_SIZE);
301 
302 		return p + SIZE_SIZE;
303 	}
304 	return p;
305 }
306 
307 /*
308  * kmem_intr_zalloc: allocate zeroed wired memory.
309  */
310 
311 void *
kmem_intr_zalloc(size_t size,km_flag_t kmflags)312 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
313 {
314 	void *p;
315 
316 	p = kmem_intr_alloc(size, kmflags);
317 	if (p != NULL) {
318 		memset(p, 0, size);
319 	}
320 	return p;
321 }
322 
323 /*
324  * kmem_intr_free: free wired memory allocated by kmem_alloc.
325  */
326 
327 void
kmem_intr_free(void * p,size_t requested_size)328 kmem_intr_free(void *p, size_t requested_size)
329 {
330 	size_t allocsz, index;
331 	size_t size;
332 	pool_cache_t pc;
333 
334 	KASSERT(p != NULL);
335 	KASSERT(requested_size > 0);
336 
337 #ifdef KMEM_GUARD
338 	if (kmem_guard_enabled) {
339 		kmem_guard_free(&kmem_guard, requested_size, p);
340 		return;
341 	}
342 #endif
343 
344 	size = kmem_roundup_size(requested_size);
345 	allocsz = size + SIZE_SIZE;
346 
347 #ifdef KMEM_REDZONE
348 	if (size - requested_size < REDZONE_SIZE) {
349 		allocsz += kmem_roundup_size(REDZONE_SIZE);
350 	}
351 #endif
352 
353 	if ((index = ((allocsz -1) >> KMEM_SHIFT))
354 	    < kmem_cache_maxidx) {
355 		pc = kmem_cache[index];
356 	} else if ((index = ((allocsz - 1) >> KMEM_BIG_SHIFT))
357 	    < kmem_cache_big_maxidx) {
358 		pc = kmem_cache_big[index];
359 	} else {
360 		FREECHECK_IN(&kmem_freecheck, p);
361 		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
362 		    round_page(size));
363 		return;
364 	}
365 
366 	p = (uint8_t *)p - SIZE_SIZE;
367 	kmem_size_check(p, requested_size);
368 	kmem_redzone_check(p, requested_size + SIZE_SIZE);
369 	FREECHECK_IN(&kmem_freecheck, p);
370 	LOCKDEBUG_MEM_CHECK(p, size);
371 	kmem_poison_fill(p, allocsz);
372 
373 	pool_cache_put(pc, p);
374 }
375 
376 /* ---- kmem API */
377 
378 /*
379  * kmem_alloc: allocate wired memory.
380  * => must not be called from interrupt context.
381  */
382 
383 void *
kmem_alloc(size_t size,km_flag_t kmflags)384 kmem_alloc(size_t size, km_flag_t kmflags)
385 {
386 	void *v;
387 
388 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
389 	    "kmem(9) should not be used from the interrupt context");
390 	v = kmem_intr_alloc(size, kmflags);
391 	KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
392 	return v;
393 }
394 
395 /*
396  * kmem_zalloc: allocate zeroed wired memory.
397  * => must not be called from interrupt context.
398  */
399 
400 void *
kmem_zalloc(size_t size,km_flag_t kmflags)401 kmem_zalloc(size_t size, km_flag_t kmflags)
402 {
403 	void *v;
404 
405 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
406 	    "kmem(9) should not be used from the interrupt context");
407 	v = kmem_intr_zalloc(size, kmflags);
408 	KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
409 	return v;
410 }
411 
412 /*
413  * kmem_free: free wired memory allocated by kmem_alloc.
414  * => must not be called from interrupt context.
415  */
416 
417 void
kmem_free(void * p,size_t size)418 kmem_free(void *p, size_t size)
419 {
420 	KASSERT(!cpu_intr_p());
421 	KASSERT(!cpu_softintr_p());
422 	kmem_intr_free(p, size);
423 }
424 
425 static size_t
kmem_create_caches(const struct kmem_cache_info * array,pool_cache_t alloc_table[],size_t maxsize,int shift,int ipl)426 kmem_create_caches(const struct kmem_cache_info *array,
427     pool_cache_t alloc_table[], size_t maxsize, int shift, int ipl)
428 {
429 	size_t maxidx = 0;
430 	size_t table_unit = (1 << shift);
431 	size_t size = table_unit;
432 	int i;
433 
434 	for (i = 0; array[i].kc_size != 0 ; i++) {
435 		const char *name = array[i].kc_name;
436 		size_t cache_size = array[i].kc_size;
437 		struct pool_allocator *pa;
438 		int flags = PR_NOALIGN;
439 		pool_cache_t pc;
440 		size_t align;
441 
442 		if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
443 			align = CACHE_LINE_SIZE;
444 		else if ((cache_size & (PAGE_SIZE - 1)) == 0)
445 			align = PAGE_SIZE;
446 		else
447 			align = KMEM_ALIGN;
448 
449 		if (cache_size < CACHE_LINE_SIZE)
450 			flags |= PR_NOTOUCH;
451 
452 		/* check if we reached the requested size */
453 		if (cache_size > maxsize || cache_size > PAGE_SIZE) {
454 			break;
455 		}
456 		if ((cache_size >> shift) > maxidx) {
457 			maxidx = cache_size >> shift;
458 		}
459 
460 		if ((cache_size >> shift) > maxidx) {
461 			maxidx = cache_size >> shift;
462 		}
463 
464 		pa = &pool_allocator_kmem;
465 #if defined(KMEM_POISON)
466 		pc = pool_cache_init(cache_size, align, 0, flags,
467 		    name, pa, ipl, kmem_poison_ctor,
468 		    NULL, (void *)cache_size);
469 #else /* defined(KMEM_POISON) */
470 		pc = pool_cache_init(cache_size, align, 0, flags,
471 		    name, pa, ipl, NULL, NULL, NULL);
472 #endif /* defined(KMEM_POISON) */
473 
474 		while (size <= cache_size) {
475 			alloc_table[(size - 1) >> shift] = pc;
476 			size += table_unit;
477 		}
478 	}
479 	return maxidx;
480 }
481 
482 void
kmem_init(void)483 kmem_init(void)
484 {
485 #ifdef KMEM_GUARD
486 	kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
487 	    kmem_va_arena);
488 #endif
489 	kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
490 	    kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
491 	kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
492 	    kmem_cache_big, PAGE_SIZE, KMEM_BIG_SHIFT, IPL_VM);
493 }
494 
495 size_t
kmem_roundup_size(size_t size)496 kmem_roundup_size(size_t size)
497 {
498 	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
499 }
500 
501 /*
502  * Used to dynamically allocate string with kmem accordingly to format.
503  */
504 char *
kmem_asprintf(const char * fmt,...)505 kmem_asprintf(const char *fmt, ...)
506 {
507 	int size __diagused, len;
508 	va_list va;
509 	char *str;
510 
511 	va_start(va, fmt);
512 	len = vsnprintf(NULL, 0, fmt, va);
513 	va_end(va);
514 
515 	str = kmem_alloc(len + 1, KM_SLEEP);
516 
517 	va_start(va, fmt);
518 	size = vsnprintf(str, len + 1, fmt, va);
519 	va_end(va);
520 
521 	KASSERT(size == len);
522 
523 	return str;
524 }
525 
526 /* ------------------ DEBUG / DIAGNOSTIC ------------------ */
527 
528 #if defined(KMEM_POISON) || defined(KMEM_REDZONE)
529 #if defined(_LP64)
530 #define PRIME 0x9e37fffffffc0000UL
531 #else /* defined(_LP64) */
532 #define PRIME 0x9e3779b1
533 #endif /* defined(_LP64) */
534 
535 static inline uint8_t
kmem_pattern_generate(const void * p)536 kmem_pattern_generate(const void *p)
537 {
538 	return (uint8_t)(((uintptr_t)p) * PRIME
539 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
540 }
541 #endif /* defined(KMEM_POISON) || defined(KMEM_REDZONE) */
542 
543 #if defined(KMEM_POISON)
544 static int
kmem_poison_ctor(void * arg,void * obj,int flag)545 kmem_poison_ctor(void *arg, void *obj, int flag)
546 {
547 	size_t sz = (size_t)arg;
548 
549 	kmem_poison_fill(obj, sz);
550 
551 	return 0;
552 }
553 
554 static void
kmem_poison_fill(void * p,size_t sz)555 kmem_poison_fill(void *p, size_t sz)
556 {
557 	uint8_t *cp;
558 	const uint8_t *ep;
559 
560 	cp = p;
561 	ep = cp + sz;
562 	while (cp < ep) {
563 		*cp = kmem_pattern_generate(cp);
564 		cp++;
565 	}
566 }
567 
568 static void
kmem_poison_check(void * p,size_t sz)569 kmem_poison_check(void *p, size_t sz)
570 {
571 	uint8_t *cp;
572 	const uint8_t *ep;
573 
574 	cp = p;
575 	ep = cp + sz;
576 	while (cp < ep) {
577 		const uint8_t expected = kmem_pattern_generate(cp);
578 
579 		if (*cp != expected) {
580 			panic("%s: %p: 0x%02x != 0x%02x\n",
581 			   __func__, cp, *cp, expected);
582 		}
583 		cp++;
584 	}
585 }
586 #endif /* defined(KMEM_POISON) */
587 
588 #if defined(KMEM_SIZE)
589 static void
kmem_size_set(void * p,size_t sz)590 kmem_size_set(void *p, size_t sz)
591 {
592 	struct kmem_header *hd;
593 	hd = (struct kmem_header *)p;
594 	hd->size = sz;
595 }
596 
597 static void
kmem_size_check(void * p,size_t sz)598 kmem_size_check(void *p, size_t sz)
599 {
600 	struct kmem_header *hd;
601 	size_t hsz;
602 
603 	hd = (struct kmem_header *)p;
604 	hsz = hd->size;
605 
606 	if (hsz != sz) {
607 		panic("kmem_free(%p, %zu) != allocated size %zu",
608 		    (const uint8_t *)p + SIZE_SIZE, sz, hsz);
609 	}
610 }
611 #endif /* defined(KMEM_SIZE) */
612 
613 #if defined(KMEM_REDZONE)
614 #define STATIC_BYTE	0xFE
615 CTASSERT(REDZONE_SIZE > 1);
616 static void
kmem_redzone_fill(void * p,size_t sz)617 kmem_redzone_fill(void *p, size_t sz)
618 {
619 	uint8_t *cp, pat;
620 	const uint8_t *ep;
621 
622 	cp = (uint8_t *)p + sz;
623 	ep = cp + REDZONE_SIZE;
624 
625 	/*
626 	 * We really don't want the first byte of the red zone to be '\0';
627 	 * an off-by-one in a string may not be properly detected.
628 	 */
629 	pat = kmem_pattern_generate(cp);
630 	*cp = (pat == '\0') ? STATIC_BYTE: pat;
631 	cp++;
632 
633 	while (cp < ep) {
634 		*cp = kmem_pattern_generate(cp);
635 		cp++;
636 	}
637 }
638 
639 static void
kmem_redzone_check(void * p,size_t sz)640 kmem_redzone_check(void *p, size_t sz)
641 {
642 	uint8_t *cp, pat, expected;
643 	const uint8_t *ep;
644 
645 	cp = (uint8_t *)p + sz;
646 	ep = cp + REDZONE_SIZE;
647 
648 	pat = kmem_pattern_generate(cp);
649 	expected = (pat == '\0') ? STATIC_BYTE: pat;
650 	if (expected != *cp) {
651 		panic("%s: %p: 0x%02x != 0x%02x\n",
652 		   __func__, cp, *cp, expected);
653 	}
654 	cp++;
655 
656 	while (cp < ep) {
657 		expected = kmem_pattern_generate(cp);
658 		if (*cp != expected) {
659 			panic("%s: %p: 0x%02x != 0x%02x\n",
660 			   __func__, cp, *cp, expected);
661 		}
662 		cp++;
663 	}
664 }
665 #endif /* defined(KMEM_REDZONE) */
666 
667 
668 #if defined(KMEM_GUARD)
669 /*
670  * The ultimate memory allocator for debugging, baby.  It tries to catch:
671  *
672  * 1. Overflow, in realtime. A guard page sits immediately after the
673  *    requested area; a read/write overflow therefore triggers a page
674  *    fault.
675  * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
676  *    just before the requested area, and holds the allocated size. Any
677  *    difference with what is given at free triggers a panic.
678  * 3. Underflow, at free. If an underflow occurs, the kmem header will be
679  *    modified, and 2. will trigger a panic.
680  * 4. Use-after-free. When freeing, the memory is unmapped, and depending
681  *    on the value of kmem_guard_depth, the kernel will more or less delay
682  *    the recycling of that memory. Which means that any ulterior read/write
683  *    access to the memory will trigger a page fault, given it hasn't been
684  *    recycled yet.
685  */
686 
687 #include <sys/atomic.h>
688 #include <uvm/uvm.h>
689 
690 static bool
kmem_guard_init(struct kmem_guard * kg,u_int depth,vmem_t * vm)691 kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
692 {
693 	vaddr_t va;
694 
695 	/* If not enabled, we have nothing to do. */
696 	if (depth == 0) {
697 		return false;
698 	}
699 	depth = roundup(depth, PAGE_SIZE / sizeof(void *));
700 	KASSERT(depth != 0);
701 
702 	/*
703 	 * Allocate fifo.
704 	 */
705 	va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
706 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
707 	if (va == 0) {
708 		return false;
709 	}
710 
711 	/*
712 	 * Init object.
713 	 */
714 	kg->kg_vmem = vm;
715 	kg->kg_fifo = (void *)va;
716 	kg->kg_depth = depth;
717 	kg->kg_rotor = 0;
718 
719 	printf("kmem_guard(%p): depth %d\n", kg, depth);
720 	return true;
721 }
722 
723 static void *
kmem_guard_alloc(struct kmem_guard * kg,size_t requested_size,bool waitok)724 kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
725 {
726 	struct vm_page *pg;
727 	vm_flag_t flags;
728 	vmem_addr_t va;
729 	vaddr_t loopva;
730 	vsize_t loopsize;
731 	size_t size;
732 	void **p;
733 
734 	/*
735 	 * Compute the size: take the kmem header into account, and add a guard
736 	 * page at the end.
737 	 */
738 	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
739 
740 	/* Allocate pages of kernel VA, but do not map anything in yet. */
741 	flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
742 	if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
743 		return NULL;
744 	}
745 
746 	loopva = va;
747 	loopsize = size - PAGE_SIZE;
748 
749 	while (loopsize) {
750 		pg = uvm_pagealloc(NULL, loopva, NULL, 0);
751 		if (__predict_false(pg == NULL)) {
752 			if (waitok) {
753 				uvm_wait("kmem_guard");
754 				continue;
755 			} else {
756 				uvm_km_pgremove_intrsafe(kernel_map, va,
757 				    va + size);
758 				vmem_free(kg->kg_vmem, va, size);
759 				return NULL;
760 			}
761 		}
762 
763 		pg->flags &= ~PG_BUSY;	/* new page */
764 		UVM_PAGE_OWN(pg, NULL);
765 		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
766 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
767 
768 		loopva += PAGE_SIZE;
769 		loopsize -= PAGE_SIZE;
770 	}
771 
772 	pmap_update(pmap_kernel());
773 
774 	/*
775 	 * Offset the returned pointer so that the unmapped guard page sits
776 	 * immediately after the returned object.
777 	 */
778 	p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
779 	kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
780 	return (void *)p;
781 }
782 
783 static void
kmem_guard_free(struct kmem_guard * kg,size_t requested_size,void * p)784 kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
785 {
786 	vaddr_t va;
787 	u_int rotor;
788 	size_t size;
789 	uint8_t *ptr;
790 
791 	ptr = (uint8_t *)p - SIZE_SIZE;
792 	kmem_size_check(ptr, requested_size);
793 	va = trunc_page((vaddr_t)ptr);
794 	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
795 
796 	KASSERT(pmap_extract(pmap_kernel(), va, NULL));
797 	KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
798 
799 	/*
800 	 * Unmap and free the pages. The last one is never allocated.
801 	 */
802 	uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
803 	pmap_update(pmap_kernel());
804 
805 #if 0
806 	/*
807 	 * XXX: Here, we need to atomically register the va and its size in the
808 	 * fifo.
809 	 */
810 
811 	/*
812 	 * Put the VA allocation into the list and swap an old one out to free.
813 	 * This behaves mostly like a fifo.
814 	 */
815 	rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
816 	va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
817 	if (va != 0) {
818 		vmem_free(kg->kg_vmem, va, size);
819 	}
820 #else
821 	(void)rotor;
822 	vmem_free(kg->kg_vmem, va, size);
823 #endif
824 }
825 
826 #endif /* defined(KMEM_GUARD) */
827