xref: /freebsd/sys/vm/memguard.c (revision c0587701)
1c0587701SJoel Dahl /*-
2e4eb384bSBosko Milekic  * Copyright (c) 2005,
38076cb52SBosko Milekic  *     Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4e4eb384bSBosko Milekic  *
5e4eb384bSBosko Milekic  * Redistribution and use in source and binary forms, with or without
6e4eb384bSBosko Milekic  * modification, are permitted provided that the following conditions
7e4eb384bSBosko Milekic  * are met:
8e4eb384bSBosko Milekic  * 1. Redistributions of source code must retain the above copyright
9e4eb384bSBosko Milekic  *    notice unmodified, this list of conditions, and the following
10e4eb384bSBosko Milekic  *    disclaimer.
11e4eb384bSBosko Milekic  * 2. Redistributions in binary form must reproduce the above copyright
12e4eb384bSBosko Milekic  *    notice, this list of conditions and the following disclaimer in the
13e4eb384bSBosko Milekic  *    documentation and/or other materials provided with the distribution.
14e4eb384bSBosko Milekic  *
15e4eb384bSBosko Milekic  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16e4eb384bSBosko Milekic  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17e4eb384bSBosko Milekic  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18e4eb384bSBosko Milekic  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19e4eb384bSBosko Milekic  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20e4eb384bSBosko Milekic  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21e4eb384bSBosko Milekic  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22e4eb384bSBosko Milekic  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23e4eb384bSBosko Milekic  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24e4eb384bSBosko Milekic  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25e4eb384bSBosko Milekic  */
26e4eb384bSBosko Milekic 
27e4eb384bSBosko Milekic #include <sys/cdefs.h>
28e4eb384bSBosko Milekic __FBSDID("$FreeBSD$");
29e4eb384bSBosko Milekic 
30e4eb384bSBosko Milekic /*
31e4eb384bSBosko Milekic  * MemGuard is a simple replacement allocator for debugging only
32e4eb384bSBosko Milekic  * which provides ElectricFence-style memory barrier protection on
33e4eb384bSBosko Milekic  * objects being allocated, and is used to detect tampering-after-free
34e4eb384bSBosko Milekic  * scenarios.
35e4eb384bSBosko Milekic  *
36e4eb384bSBosko Milekic  * See the memguard(9) man page for more information on using MemGuard.
37e4eb384bSBosko Milekic  */
38e4eb384bSBosko Milekic 
39e4eb384bSBosko Milekic #include <sys/param.h>
40e4eb384bSBosko Milekic #include <sys/systm.h>
41e4eb384bSBosko Milekic #include <sys/kernel.h>
42e4eb384bSBosko Milekic #include <sys/types.h>
43e4eb384bSBosko Milekic #include <sys/queue.h>
44e4eb384bSBosko Milekic #include <sys/lock.h>
45e4eb384bSBosko Milekic #include <sys/mutex.h>
46e4eb384bSBosko Milekic #include <sys/malloc.h>
47d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h>
48e4eb384bSBosko Milekic 
49e4eb384bSBosko Milekic #include <vm/vm.h>
5003412565SBosko Milekic #include <vm/vm_param.h>
51e4eb384bSBosko Milekic #include <vm/vm_page.h>
52e4eb384bSBosko Milekic #include <vm/vm_map.h>
53e4eb384bSBosko Milekic #include <vm/vm_extern.h>
54e4eb384bSBosko Milekic #include <vm/memguard.h>
55e4eb384bSBosko Milekic 
56e4eb384bSBosko Milekic /*
5703412565SBosko Milekic  * The maximum number of pages allowed per allocation.  If you're using
5803412565SBosko Milekic  * MemGuard to override very large items (> MAX_PAGES_PER_ITEM in size),
5903412565SBosko Milekic  * you need to increase MAX_PAGES_PER_ITEM.
6003412565SBosko Milekic  */
6103412565SBosko Milekic #define	MAX_PAGES_PER_ITEM	64
6203412565SBosko Milekic 
63d362c40dSPawel Jakub Dawidek SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
64d362c40dSPawel Jakub Dawidek /*
65d362c40dSPawel Jakub Dawidek  * The vm_memguard_divisor variable controls how much of kmem_map should be
66d362c40dSPawel Jakub Dawidek  * reserved for MemGuard.
67d362c40dSPawel Jakub Dawidek  */
68d362c40dSPawel Jakub Dawidek u_int vm_memguard_divisor;
69d362c40dSPawel Jakub Dawidek SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RD, &vm_memguard_divisor,
70d362c40dSPawel Jakub Dawidek     0, "(kmem_size/memguard_divisor) == memguard submap size");
71d362c40dSPawel Jakub Dawidek 
72d362c40dSPawel Jakub Dawidek /*
73d362c40dSPawel Jakub Dawidek  * Short description (ks_shortdesc) of memory type to monitor.
74d362c40dSPawel Jakub Dawidek  */
75d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = "";
76d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL;
77d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
78d362c40dSPawel Jakub Dawidek static int
79d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
80d362c40dSPawel Jakub Dawidek {
81d362c40dSPawel Jakub Dawidek 	struct malloc_type_internal *mtip;
82d362c40dSPawel Jakub Dawidek 	struct malloc_type_stats *mtsp;
83d362c40dSPawel Jakub Dawidek 	struct malloc_type *mtp;
84d362c40dSPawel Jakub Dawidek 	char desc[128];
85d362c40dSPawel Jakub Dawidek 	long bytes;
86d362c40dSPawel Jakub Dawidek 	int error, i;
87d362c40dSPawel Jakub Dawidek 
88d362c40dSPawel Jakub Dawidek 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
89d362c40dSPawel Jakub Dawidek 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
90d362c40dSPawel Jakub Dawidek 	if (error != 0 || req->newptr == NULL)
91d362c40dSPawel Jakub Dawidek 		return (error);
92d362c40dSPawel Jakub Dawidek 
93d362c40dSPawel Jakub Dawidek 	/*
94d362c40dSPawel Jakub Dawidek 	 * We can change memory type when no memory has been allocated for it
95d362c40dSPawel Jakub Dawidek 	 * or when there is no such memory type yet (ie. it will be loaded with
96d362c40dSPawel Jakub Dawidek 	 * kernel module).
97d362c40dSPawel Jakub Dawidek 	 */
98d362c40dSPawel Jakub Dawidek 	bytes = 0;
99d362c40dSPawel Jakub Dawidek 	mtx_lock(&malloc_mtx);
100d362c40dSPawel Jakub Dawidek 	mtp = malloc_desc2type(desc);
101d362c40dSPawel Jakub Dawidek 	if (mtp != NULL) {
102d362c40dSPawel Jakub Dawidek 		mtip = mtp->ks_handle;
103d362c40dSPawel Jakub Dawidek 		for (i = 0; i < MAXCPU; i++) {
104d362c40dSPawel Jakub Dawidek 			mtsp = &mtip->mti_stats[i];
105d362c40dSPawel Jakub Dawidek 			bytes += mtsp->mts_memalloced;
106d362c40dSPawel Jakub Dawidek 			bytes -= mtsp->mts_memfreed;
107d362c40dSPawel Jakub Dawidek 		}
108d362c40dSPawel Jakub Dawidek 	}
109d362c40dSPawel Jakub Dawidek 	if (bytes > 0)
110d362c40dSPawel Jakub Dawidek 		error = EBUSY;
111d362c40dSPawel Jakub Dawidek 	else {
112d362c40dSPawel Jakub Dawidek 		/*
113d362c40dSPawel Jakub Dawidek 		 * If mtp is NULL, it will be initialized in memguard_cmp().
114d362c40dSPawel Jakub Dawidek 		 */
115d362c40dSPawel Jakub Dawidek 		vm_memguard_mtype = mtp;
116d362c40dSPawel Jakub Dawidek 		strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
117d362c40dSPawel Jakub Dawidek 	}
118d362c40dSPawel Jakub Dawidek 	mtx_unlock(&malloc_mtx);
119d362c40dSPawel Jakub Dawidek 	return (error);
120d362c40dSPawel Jakub Dawidek }
121d362c40dSPawel Jakub Dawidek SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
122d362c40dSPawel Jakub Dawidek     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
123d362c40dSPawel Jakub Dawidek 
12403412565SBosko Milekic /*
125e4eb384bSBosko Milekic  * Global MemGuard data.
126e4eb384bSBosko Milekic  */
127e4eb384bSBosko Milekic static vm_map_t memguard_map;
128e4eb384bSBosko Milekic static unsigned long memguard_mapsize;
129e4eb384bSBosko Milekic static unsigned long memguard_mapused;
130e4eb384bSBosko Milekic struct memguard_entry {
131e4eb384bSBosko Milekic 	STAILQ_ENTRY(memguard_entry) entries;
132e4eb384bSBosko Milekic 	void *ptr;
133e4eb384bSBosko Milekic };
13403412565SBosko Milekic static struct memguard_fifo {
13503412565SBosko Milekic 	struct memguard_entry *stqh_first;
13603412565SBosko Milekic 	struct memguard_entry **stqh_last;
13703412565SBosko Milekic 	int index;
13803412565SBosko Milekic } memguard_fifo_pool[MAX_PAGES_PER_ITEM];
139e4eb384bSBosko Milekic 
140e4eb384bSBosko Milekic /*
141e4eb384bSBosko Milekic  * Local prototypes.
142e4eb384bSBosko Milekic  */
14303412565SBosko Milekic static void memguard_guard(void *addr, int numpgs);
14403412565SBosko Milekic static void memguard_unguard(void *addr, int numpgs);
14503412565SBosko Milekic static struct memguard_fifo *vtomgfifo(vm_offset_t va);
14603412565SBosko Milekic static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo);
14703412565SBosko Milekic static void vclrmgfifo(vm_offset_t va);
148e4eb384bSBosko Milekic 
149e4eb384bSBosko Milekic /*
150e4eb384bSBosko Milekic  * Local macros.  MemGuard data is global, so replace these with whatever
151e4eb384bSBosko Milekic  * your system uses to protect global data (if it is kernel-level
152e4eb384bSBosko Milekic  * parallelized).  This is for porting among BSDs.
153e4eb384bSBosko Milekic  */
154e4eb384bSBosko Milekic #define	MEMGUARD_CRIT_SECTION_DECLARE	static struct mtx memguard_mtx
155e4eb384bSBosko Milekic #define	MEMGUARD_CRIT_SECTION_INIT				\
156e4eb384bSBosko Milekic 	mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF)
157e4eb384bSBosko Milekic #define	MEMGUARD_CRIT_SECTION_ENTER	mtx_lock(&memguard_mtx)
158e4eb384bSBosko Milekic #define	MEMGUARD_CRIT_SECTION_EXIT	mtx_unlock(&memguard_mtx)
159e4eb384bSBosko Milekic MEMGUARD_CRIT_SECTION_DECLARE;
160e4eb384bSBosko Milekic 
161e4eb384bSBosko Milekic /*
162e4eb384bSBosko Milekic  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
163e4eb384bSBosko Milekic  * out of a single VM map (contiguous chunk of address space).
164e4eb384bSBosko Milekic  */
165e4eb384bSBosko Milekic void
166e4eb384bSBosko Milekic memguard_init(vm_map_t parent_map, unsigned long size)
167e4eb384bSBosko Milekic {
168e4eb384bSBosko Milekic 	char *base, *limit;
16903412565SBosko Milekic 	int i;
170e4eb384bSBosko Milekic 
171e4eb384bSBosko Milekic 	/* size must be multiple of PAGE_SIZE */
172e4eb384bSBosko Milekic 	size /= PAGE_SIZE;
173e4eb384bSBosko Milekic 	size++;
174e4eb384bSBosko Milekic 	size *= PAGE_SIZE;
175e4eb384bSBosko Milekic 
176e4eb384bSBosko Milekic 	memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base,
1776ac3ab7fSAlan Cox 	    (vm_offset_t *)&limit, (vm_size_t)size, FALSE);
178e4eb384bSBosko Milekic 	memguard_map->system_map = 1;
179e4eb384bSBosko Milekic 	memguard_mapsize = size;
180e4eb384bSBosko Milekic 	memguard_mapused = 0;
181e4eb384bSBosko Milekic 
182e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_INIT;
183e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_ENTER;
18403412565SBosko Milekic 	for (i = 0; i < MAX_PAGES_PER_ITEM; i++) {
18503412565SBosko Milekic 		STAILQ_INIT(&memguard_fifo_pool[i]);
18603412565SBosko Milekic 		memguard_fifo_pool[i].index = i;
18703412565SBosko Milekic 	}
188e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_EXIT;
189e4eb384bSBosko Milekic 
190e4eb384bSBosko Milekic 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
191e4eb384bSBosko Milekic 	printf("\tMEMGUARD map base: %p\n", base);
192e4eb384bSBosko Milekic 	printf("\tMEMGUARD map limit: %p\n", limit);
193e4eb384bSBosko Milekic 	printf("\tMEMGUARD map size: %ld (Bytes)\n", size);
194e4eb384bSBosko Milekic }
195e4eb384bSBosko Milekic 
196e4eb384bSBosko Milekic /*
197e4eb384bSBosko Milekic  * Allocate a single object of specified size with specified flags (either
198e4eb384bSBosko Milekic  * M_WAITOK or M_NOWAIT).
199e4eb384bSBosko Milekic  */
200e4eb384bSBosko Milekic void *
201e4eb384bSBosko Milekic memguard_alloc(unsigned long size, int flags)
202e4eb384bSBosko Milekic {
20303412565SBosko Milekic 	void *obj;
204e4eb384bSBosko Milekic 	struct memguard_entry *e = NULL;
20503412565SBosko Milekic 	int numpgs;
206e4eb384bSBosko Milekic 
20703412565SBosko Milekic 	numpgs = size / PAGE_SIZE;
20803412565SBosko Milekic 	if ((size % PAGE_SIZE) != 0)
20903412565SBosko Milekic 		numpgs++;
21003412565SBosko Milekic 	if (numpgs > MAX_PAGES_PER_ITEM)
21103412565SBosko Milekic 		panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \
21203412565SBosko Milekic 		    "in memguard.c (requested: %d pages)", numpgs);
21303412565SBosko Milekic 	if (numpgs == 0)
21403412565SBosko Milekic 		return NULL;
215e4eb384bSBosko Milekic 
216e4eb384bSBosko Milekic 	/*
217e4eb384bSBosko Milekic 	 * If we haven't exhausted the memguard_map yet, allocate from
218e4eb384bSBosko Milekic 	 * it and grab a new page, even if we have recycled pages in our
219e4eb384bSBosko Milekic 	 * FIFO.  This is because we wish to allow recycled pages to live
220e4eb384bSBosko Milekic 	 * guarded in the FIFO for as long as possible in order to catch
221e4eb384bSBosko Milekic 	 * even very late tamper-after-frees, even though it means that
222e4eb384bSBosko Milekic 	 * we end up wasting more memory, this is only a DEBUGGING allocator
223e4eb384bSBosko Milekic 	 * after all.
224e4eb384bSBosko Milekic 	 */
225e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_ENTER;
226e4eb384bSBosko Milekic 	if (memguard_mapused >= memguard_mapsize) {
22703412565SBosko Milekic 		e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]);
228e4eb384bSBosko Milekic 		if (e != NULL) {
22903412565SBosko Milekic 			STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e,
230e4eb384bSBosko Milekic 			    memguard_entry, entries);
231e4eb384bSBosko Milekic 			MEMGUARD_CRIT_SECTION_EXIT;
232e4eb384bSBosko Milekic 			obj = e->ptr;
233e4eb384bSBosko Milekic 			free(e, M_TEMP);
23403412565SBosko Milekic 			memguard_unguard(obj, numpgs);
235e4eb384bSBosko Milekic 			if (flags & M_ZERO)
23603412565SBosko Milekic 				bzero(obj, PAGE_SIZE * numpgs);
237e4eb384bSBosko Milekic 			return obj;
238e4eb384bSBosko Milekic 		}
239e4eb384bSBosko Milekic 		MEMGUARD_CRIT_SECTION_EXIT;
240e4eb384bSBosko Milekic 		if (flags & M_WAITOK)
241e4eb384bSBosko Milekic 			panic("MEMGUARD: Failed with M_WAITOK: " \
242e4eb384bSBosko Milekic 			    "memguard_map too small");
243e4eb384bSBosko Milekic 		return NULL;
24403412565SBosko Milekic 	}
24503412565SBosko Milekic 	memguard_mapused += (PAGE_SIZE * numpgs);
246e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_EXIT;
247e4eb384bSBosko Milekic 
24803412565SBosko Milekic 	obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags);
249e4eb384bSBosko Milekic 	if (obj != NULL) {
25003412565SBosko Milekic 		vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]);
251e4eb384bSBosko Milekic 		if (flags & M_ZERO)
25203412565SBosko Milekic 			bzero(obj, PAGE_SIZE * numpgs);
253e4eb384bSBosko Milekic 	} else {
254e4eb384bSBosko Milekic 		MEMGUARD_CRIT_SECTION_ENTER;
25503412565SBosko Milekic 		memguard_mapused -= (PAGE_SIZE * numpgs);
256e4eb384bSBosko Milekic 		MEMGUARD_CRIT_SECTION_EXIT;
257e4eb384bSBosko Milekic 	}
258e4eb384bSBosko Milekic 	return obj;
259e4eb384bSBosko Milekic }
260e4eb384bSBosko Milekic 
261e4eb384bSBosko Milekic /*
262e4eb384bSBosko Milekic  * Free specified single object.
263e4eb384bSBosko Milekic  */
264e4eb384bSBosko Milekic void
265e4eb384bSBosko Milekic memguard_free(void *addr)
266e4eb384bSBosko Milekic {
267e4eb384bSBosko Milekic 	struct memguard_entry *e;
26803412565SBosko Milekic 	struct memguard_fifo *mgfifo;
26903412565SBosko Milekic 	int idx;
27003412565SBosko Milekic 	int *temp;
271e4eb384bSBosko Milekic 
27203412565SBosko Milekic 	addr = (void *)trunc_page((unsigned long)addr);
27303412565SBosko Milekic 
27403412565SBosko Milekic 	/*
27503412565SBosko Milekic 	 * Page should not be guarded by now, so force a write.
27603412565SBosko Milekic 	 * The purpose of this is to increase the likelihood of catching a
27703412565SBosko Milekic 	 * double-free, but not necessarily a tamper-after-free (the second
27803412565SBosko Milekic 	 * thread freeing might not write before freeing, so this forces it
27903412565SBosko Milekic 	 * to and, subsequently, trigger a fault).
28003412565SBosko Milekic 	 */
28103412565SBosko Milekic 	temp = (int *)((unsigned long)addr + (PAGE_SIZE/2)); 	/* in page */
28203412565SBosko Milekic 	*temp = 0xd34dc0d3;
28303412565SBosko Milekic 
28403412565SBosko Milekic 	mgfifo = vtomgfifo((vm_offset_t)addr);
28503412565SBosko Milekic 	idx = mgfifo->index;
28603412565SBosko Milekic 	memguard_guard(addr, idx + 1);
287e4eb384bSBosko Milekic 	e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT);
288e4eb384bSBosko Milekic 	if (e == NULL) {
289e4eb384bSBosko Milekic 		MEMGUARD_CRIT_SECTION_ENTER;
29003412565SBosko Milekic 		memguard_mapused -= (PAGE_SIZE * (idx + 1));
291e4eb384bSBosko Milekic 		MEMGUARD_CRIT_SECTION_EXIT;
29203412565SBosko Milekic 		memguard_unguard(addr, idx + 1);	/* just in case */
29303412565SBosko Milekic 		vclrmgfifo((vm_offset_t)addr);
29403412565SBosko Milekic 		kmem_free(memguard_map, (vm_offset_t)addr,
29503412565SBosko Milekic 		    PAGE_SIZE * (idx + 1));
296e4eb384bSBosko Milekic 		return;
297e4eb384bSBosko Milekic 	}
29803412565SBosko Milekic 	e->ptr = addr;
299e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_ENTER;
30003412565SBosko Milekic 	STAILQ_INSERT_TAIL(mgfifo, e, entries);
301e4eb384bSBosko Milekic 	MEMGUARD_CRIT_SECTION_EXIT;
302e4eb384bSBosko Milekic }
303e4eb384bSBosko Milekic 
304d362c40dSPawel Jakub Dawidek int
305d362c40dSPawel Jakub Dawidek memguard_cmp(struct malloc_type *mtp)
306d362c40dSPawel Jakub Dawidek {
307d362c40dSPawel Jakub Dawidek 
308d362c40dSPawel Jakub Dawidek #if 1
309d362c40dSPawel Jakub Dawidek 	/*
310d362c40dSPawel Jakub Dawidek 	 * The safest way of comparsion is to always compare short description
311d362c40dSPawel Jakub Dawidek 	 * string of memory type, but it is also the slowest way.
312d362c40dSPawel Jakub Dawidek 	 */
313d362c40dSPawel Jakub Dawidek 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
314d362c40dSPawel Jakub Dawidek #else
315d362c40dSPawel Jakub Dawidek 	/*
316d362c40dSPawel Jakub Dawidek 	 * If we compare pointers, there are two possible problems:
317d362c40dSPawel Jakub Dawidek 	 * 1. Memory type was unloaded and new memory type was allocated at the
318d362c40dSPawel Jakub Dawidek 	 *    same address.
319d362c40dSPawel Jakub Dawidek 	 * 2. Memory type was unloaded and loaded again, but allocated at a
320d362c40dSPawel Jakub Dawidek 	 *    different address.
321d362c40dSPawel Jakub Dawidek 	 */
322d362c40dSPawel Jakub Dawidek 	if (vm_memguard_mtype != NULL)
323d362c40dSPawel Jakub Dawidek 		return (mtp == vm_memguard_mtype);
324d362c40dSPawel Jakub Dawidek 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
325d362c40dSPawel Jakub Dawidek 		vm_memguard_mtype = mtp;
326d362c40dSPawel Jakub Dawidek 		return (1);
327d362c40dSPawel Jakub Dawidek 	}
328d362c40dSPawel Jakub Dawidek 	return (0);
329d362c40dSPawel Jakub Dawidek #endif
330d362c40dSPawel Jakub Dawidek }
331d362c40dSPawel Jakub Dawidek 
332e4eb384bSBosko Milekic /*
333e4eb384bSBosko Milekic  * Guard a page containing specified object (make it read-only so that
334e4eb384bSBosko Milekic  * future writes to it fail).
335e4eb384bSBosko Milekic  */
336e4eb384bSBosko Milekic static void
33703412565SBosko Milekic memguard_guard(void *addr, int numpgs)
338e4eb384bSBosko Milekic {
339eca64e79SBosko Milekic 	void *a = (void *)trunc_page((unsigned long)addr);
34003412565SBosko Milekic 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
34103412565SBosko Milekic 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
34203412565SBosko Milekic 	    VM_PROT_READ, FALSE) != KERN_SUCCESS)
34303412565SBosko Milekic 		panic("MEMGUARD: Unable to guard page!");
344e4eb384bSBosko Milekic }
345e4eb384bSBosko Milekic 
346e4eb384bSBosko Milekic /*
347e4eb384bSBosko Milekic  * Unguard a page containing specified object (make it read-and-write to
348e4eb384bSBosko Milekic  * allow full data access).
349e4eb384bSBosko Milekic  */
350e4eb384bSBosko Milekic static void
35103412565SBosko Milekic memguard_unguard(void *addr, int numpgs)
352e4eb384bSBosko Milekic {
353eca64e79SBosko Milekic 	void *a = (void *)trunc_page((unsigned long)addr);
35403412565SBosko Milekic 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
35503412565SBosko Milekic 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
35603412565SBosko Milekic 	    VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS)
35703412565SBosko Milekic 		panic("MEMGUARD: Unable to unguard page!");
35803412565SBosko Milekic }
35903412565SBosko Milekic 
36003412565SBosko Milekic /*
36103412565SBosko Milekic  * vtomgfifo() converts a virtual address of the first page allocated for
36203412565SBosko Milekic  * an item to a memguard_fifo_pool reference for the corresponding item's
36303412565SBosko Milekic  * size.
36403412565SBosko Milekic  *
36503412565SBosko Milekic  * vsetmgfifo() sets a reference in an underlying page for the specified
36603412565SBosko Milekic  * virtual address to an appropriate memguard_fifo_pool.
36703412565SBosko Milekic  *
3687fae6a11SBosko Milekic  * These routines are very similar to those defined by UMA in uma_int.h.
3697fae6a11SBosko Milekic  * The difference is that these routines store the mgfifo in one of the
3707fae6a11SBosko Milekic  * page's fields that is unused when the page is wired rather than the
3717fae6a11SBosko Milekic  * object field, which is used.
37203412565SBosko Milekic  */
37303412565SBosko Milekic static struct memguard_fifo *
37403412565SBosko Milekic vtomgfifo(vm_offset_t va)
37503412565SBosko Milekic {
37603412565SBosko Milekic 	vm_page_t p;
37703412565SBosko Milekic 	struct memguard_fifo *mgfifo;
37803412565SBosko Milekic 
37903412565SBosko Milekic 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
3807fae6a11SBosko Milekic 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
3817fae6a11SBosko Milekic 	    ("MEMGUARD: Expected wired page in vtomgfifo!"));
3827fae6a11SBosko Milekic 	mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
38303412565SBosko Milekic 	return mgfifo;
38403412565SBosko Milekic }
38503412565SBosko Milekic 
38603412565SBosko Milekic static void
38703412565SBosko Milekic vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
38803412565SBosko Milekic {
38903412565SBosko Milekic 	vm_page_t p;
39003412565SBosko Milekic 
39103412565SBosko Milekic 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
3927fae6a11SBosko Milekic 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
3937fae6a11SBosko Milekic 	    ("MEMGUARD: Expected wired page in vsetmgfifo!"));
3947fae6a11SBosko Milekic 	p->pageq.tqe_next = (vm_page_t)mgfifo;
39503412565SBosko Milekic }
39603412565SBosko Milekic 
39703412565SBosko Milekic static void vclrmgfifo(vm_offset_t va)
39803412565SBosko Milekic {
39903412565SBosko Milekic 	vm_page_t p;
40003412565SBosko Milekic 
40103412565SBosko Milekic 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
4027fae6a11SBosko Milekic 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
4037fae6a11SBosko Milekic 	    ("MEMGUARD: Expected wired page in vclrmgfifo!"));
4047fae6a11SBosko Milekic 	p->pageq.tqe_next = NULL;
405e4eb384bSBosko Milekic }
406