xref: /freebsd/sys/vm/memguard.c (revision 685dc743)
1c0587701SJoel Dahl /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3fe267a55SPedro F. Giffuni  *
4e3813573SMatthew D Fleming  * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
5e3813573SMatthew D Fleming  * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
6e3813573SMatthew D Fleming  * All rights reserved.
7e4eb384bSBosko Milekic  *
8e4eb384bSBosko Milekic  * Redistribution and use in source and binary forms, with or without
9e4eb384bSBosko Milekic  * modification, are permitted provided that the following conditions
10e4eb384bSBosko Milekic  * are met:
11e4eb384bSBosko Milekic  * 1. Redistributions of source code must retain the above copyright
12e4eb384bSBosko Milekic  *    notice unmodified, this list of conditions, and the following
13e4eb384bSBosko Milekic  *    disclaimer.
14e4eb384bSBosko Milekic  * 2. Redistributions in binary form must reproduce the above copyright
15e4eb384bSBosko Milekic  *    notice, this list of conditions and the following disclaimer in the
16e4eb384bSBosko Milekic  *    documentation and/or other materials provided with the distribution.
17e4eb384bSBosko Milekic  *
18e4eb384bSBosko Milekic  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19e4eb384bSBosko Milekic  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20e4eb384bSBosko Milekic  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21e4eb384bSBosko Milekic  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22e4eb384bSBosko Milekic  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23e4eb384bSBosko Milekic  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24e4eb384bSBosko Milekic  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25e4eb384bSBosko Milekic  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26e4eb384bSBosko Milekic  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27e4eb384bSBosko Milekic  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28e4eb384bSBosko Milekic  */
29e4eb384bSBosko Milekic 
30e4eb384bSBosko Milekic #include <sys/cdefs.h>
31e4eb384bSBosko Milekic /*
32e4eb384bSBosko Milekic  * MemGuard is a simple replacement allocator for debugging only
33e4eb384bSBosko Milekic  * which provides ElectricFence-style memory barrier protection on
34e4eb384bSBosko Milekic  * objects being allocated, and is used to detect tampering-after-free
35e4eb384bSBosko Milekic  * scenarios.
36e4eb384bSBosko Milekic  *
37e4eb384bSBosko Milekic  * See the memguard(9) man page for more information on using MemGuard.
38e4eb384bSBosko Milekic  */
39e4eb384bSBosko Milekic 
40f02d86e2SMatthew D Fleming #include "opt_vm.h"
41f02d86e2SMatthew D Fleming 
42e4eb384bSBosko Milekic #include <sys/param.h>
43e4eb384bSBosko Milekic #include <sys/systm.h>
44e4eb384bSBosko Milekic #include <sys/kernel.h>
45e4eb384bSBosko Milekic #include <sys/types.h>
46e4eb384bSBosko Milekic #include <sys/queue.h>
47e4eb384bSBosko Milekic #include <sys/lock.h>
48e4eb384bSBosko Milekic #include <sys/mutex.h>
49e4eb384bSBosko Milekic #include <sys/malloc.h>
50d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h>
515df87b21SJeff Roberson #include <sys/vmem.h>
529ed01c32SGleb Smirnoff #include <sys/vmmeter.h>
53e4eb384bSBosko Milekic 
54e4eb384bSBosko Milekic #include <vm/vm.h>
55e3813573SMatthew D Fleming #include <vm/uma.h>
5603412565SBosko Milekic #include <vm/vm_param.h>
57e4eb384bSBosko Milekic #include <vm/vm_page.h>
58e4eb384bSBosko Milekic #include <vm/vm_map.h>
59e3813573SMatthew D Fleming #include <vm/vm_object.h>
608441d1e8SJeff Roberson #include <vm/vm_kern.h>
61e4eb384bSBosko Milekic #include <vm/vm_extern.h>
628d689e04SGleb Smirnoff #include <vm/uma_int.h>
63e4eb384bSBosko Milekic #include <vm/memguard.h>
64e4eb384bSBosko Milekic 
657029da5cSPawel Biernacki static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
667029da5cSPawel Biernacki     "MemGuard data");
67d362c40dSPawel Jakub Dawidek /*
682e47807cSJeff Roberson  * The vm_memguard_divisor variable controls how much of kernel_arena should be
69d362c40dSPawel Jakub Dawidek  * reserved for MemGuard.
70d362c40dSPawel Jakub Dawidek  */
71e3813573SMatthew D Fleming static u_int vm_memguard_divisor;
72af3b2549SHans Petter Selasky SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
73e3813573SMatthew D Fleming     &vm_memguard_divisor,
74d362c40dSPawel Jakub Dawidek     0, "(kmem_size/memguard_divisor) == memguard submap size");
75d362c40dSPawel Jakub Dawidek 
76d362c40dSPawel Jakub Dawidek /*
77d362c40dSPawel Jakub Dawidek  * Short description (ks_shortdesc) of memory type to monitor.
78d362c40dSPawel Jakub Dawidek  */
79d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = "";
80d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL;
81d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
82d362c40dSPawel Jakub Dawidek static int
memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)83d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
84d362c40dSPawel Jakub Dawidek {
85e3813573SMatthew D Fleming 	char desc[sizeof(vm_memguard_desc)];
86e3813573SMatthew D Fleming 	int error;
87d362c40dSPawel Jakub Dawidek 
88d362c40dSPawel Jakub Dawidek 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
89d362c40dSPawel Jakub Dawidek 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
90d362c40dSPawel Jakub Dawidek 	if (error != 0 || req->newptr == NULL)
91d362c40dSPawel Jakub Dawidek 		return (error);
92d362c40dSPawel Jakub Dawidek 
93d362c40dSPawel Jakub Dawidek 	mtx_lock(&malloc_mtx);
94b2ecae3fSEnji Cooper 	/* If mtp is NULL, it will be initialized in memguard_cmp() */
95e3813573SMatthew D Fleming 	vm_memguard_mtype = malloc_desc2type(desc);
96d362c40dSPawel Jakub Dawidek 	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
97d362c40dSPawel Jakub Dawidek 	mtx_unlock(&malloc_mtx);
98d362c40dSPawel Jakub Dawidek 	return (error);
99d362c40dSPawel Jakub Dawidek }
100e3813573SMatthew D Fleming SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
101e3813573SMatthew D Fleming     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
102d362c40dSPawel Jakub Dawidek     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
103d362c40dSPawel Jakub Dawidek 
104ccc5d6ddSMark Johnston static int
memguard_sysctl_mapused(SYSCTL_HANDLER_ARGS)105ccc5d6ddSMark Johnston memguard_sysctl_mapused(SYSCTL_HANDLER_ARGS)
106ccc5d6ddSMark Johnston {
107ccc5d6ddSMark Johnston 	vmem_size_t size;
108ccc5d6ddSMark Johnston 
109ccc5d6ddSMark Johnston 	size = vmem_size(memguard_arena, VMEM_ALLOC);
110ccc5d6ddSMark Johnston 	return (sysctl_handle_long(oidp, &size, sizeof(size), req));
111ccc5d6ddSMark Johnston }
112ccc5d6ddSMark Johnston 
1135df87b21SJeff Roberson static vm_offset_t memguard_base;
114e3813573SMatthew D Fleming static vm_size_t memguard_mapsize;
115e3813573SMatthew D Fleming static vm_size_t memguard_physlimit;
116e3813573SMatthew D Fleming static u_long memguard_wasted;
117e3813573SMatthew D Fleming static u_long memguard_succ;
118e3813573SMatthew D Fleming static u_long memguard_fail_kva;
119e3813573SMatthew D Fleming static u_long memguard_fail_pgs;
120e3813573SMatthew D Fleming 
121e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
1225df87b21SJeff Roberson     &memguard_mapsize, 0, "MemGuard private arena size");
123e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
124e3813573SMatthew D Fleming     &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
125e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
126e3813573SMatthew D Fleming     &memguard_wasted, 0, "Excess memory used through page promotion");
127e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
128e3813573SMatthew D Fleming     &memguard_succ, 0, "Count of successful MemGuard allocations");
129e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
130e3813573SMatthew D Fleming     &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
131e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
132e3813573SMatthew D Fleming     &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
133e3813573SMatthew D Fleming 
1348d689e04SGleb Smirnoff #define MG_GUARD_AROUND		0x001
1358d689e04SGleb Smirnoff #define MG_GUARD_ALLLARGE	0x002
1368d689e04SGleb Smirnoff #define MG_GUARD_NOFREE		0x004
1378d689e04SGleb Smirnoff static int memguard_options = MG_GUARD_AROUND;
138af3b2549SHans Petter Selasky SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN,
139e3813573SMatthew D Fleming     &memguard_options, 0,
140e3813573SMatthew D Fleming     "MemGuard options:\n"
141e3813573SMatthew D Fleming     "\t0x001 - add guard pages around each allocation\n"
1428d689e04SGleb Smirnoff     "\t0x002 - always use MemGuard for allocations over a page\n"
1438d689e04SGleb Smirnoff     "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
144e3813573SMatthew D Fleming 
145e3813573SMatthew D Fleming static u_int memguard_minsize;
146e3813573SMatthew D Fleming static u_long memguard_minsize_reject;
147e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
148e3813573SMatthew D Fleming     &memguard_minsize, 0, "Minimum size for page promotion");
149e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
150e3813573SMatthew D Fleming     &memguard_minsize_reject, 0, "# times rejected for size");
151e3813573SMatthew D Fleming 
152e3813573SMatthew D Fleming static u_int memguard_frequency;
153e3813573SMatthew D Fleming static u_long memguard_frequency_hits;
154af3b2549SHans Petter Selasky SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
155e3813573SMatthew D Fleming     &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
156e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
157e3813573SMatthew D Fleming     &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
158e3813573SMatthew D Fleming 
159e4eb384bSBosko Milekic /*
160e3813573SMatthew D Fleming  * Return a fudged value to be used for vm_kmem_size for allocating
161ccc5d6ddSMark Johnston  * the kernel_arena.
162e4eb384bSBosko Milekic  */
163e3813573SMatthew D Fleming unsigned long
memguard_fudge(unsigned long km_size,const struct vm_map * parent_map)164f806cdcfSMatthew D Fleming memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
165e3813573SMatthew D Fleming {
166f806cdcfSMatthew D Fleming 	u_long mem_pgs, parent_size;
167e4eb384bSBosko Milekic 
168e3813573SMatthew D Fleming 	vm_memguard_divisor = 10;
169b575067aSRui Paulo 	/* CTFLAG_RDTUN doesn't work during the early boot process. */
170e3813573SMatthew D Fleming 	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
171e3813573SMatthew D Fleming 
172f806cdcfSMatthew D Fleming 	parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
173f806cdcfSMatthew D Fleming 	    PAGE_SIZE;
174e3813573SMatthew D Fleming 	/* Pick a conservative value if provided value sucks. */
175e3813573SMatthew D Fleming 	if ((vm_memguard_divisor <= 0) ||
176f806cdcfSMatthew D Fleming 	    ((parent_size / vm_memguard_divisor) == 0))
177e3813573SMatthew D Fleming 		vm_memguard_divisor = 10;
178e4eb384bSBosko Milekic 	/*
179e3813573SMatthew D Fleming 	 * Limit consumption of physical pages to
180e3813573SMatthew D Fleming 	 * 1/vm_memguard_divisor of system memory.  If the KVA is
181e3813573SMatthew D Fleming 	 * smaller than this then the KVA limit comes into play first.
182e3813573SMatthew D Fleming 	 * This prevents memguard's page promotions from completely
183e3813573SMatthew D Fleming 	 * using up memory, since most malloc(9) calls are sub-page.
184e4eb384bSBosko Milekic 	 */
18544f1c916SBryan Drewery 	mem_pgs = vm_cnt.v_page_count;
186e3813573SMatthew D Fleming 	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
187e3813573SMatthew D Fleming 	/*
188e3813573SMatthew D Fleming 	 * We want as much KVA as we can take safely.  Use at most our
189f806cdcfSMatthew D Fleming 	 * allotted fraction of the parent map's size.  Limit this to
190f806cdcfSMatthew D Fleming 	 * twice the physical memory to avoid using too much memory as
191f806cdcfSMatthew D Fleming 	 * pagetable pages (size must be multiple of PAGE_SIZE).
192e3813573SMatthew D Fleming 	 */
193f806cdcfSMatthew D Fleming 	memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
194f806cdcfSMatthew D Fleming 	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
195e3813573SMatthew D Fleming 		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
196f806cdcfSMatthew D Fleming 	if (km_size + memguard_mapsize > parent_size)
197f806cdcfSMatthew D Fleming 		memguard_mapsize = 0;
198e3813573SMatthew D Fleming 	return (km_size + memguard_mapsize);
199e3813573SMatthew D Fleming }
200e4eb384bSBosko Milekic 
201e4eb384bSBosko Milekic /*
202e4eb384bSBosko Milekic  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
203ccc5d6ddSMark Johnston  * out of a single contiguous chunk of kernel address space that is managed
204ccc5d6ddSMark Johnston  * by a vmem arena.
205e4eb384bSBosko Milekic  */
206e4eb384bSBosko Milekic void
memguard_init(vmem_t * parent)2075df87b21SJeff Roberson memguard_init(vmem_t *parent)
208e4eb384bSBosko Milekic {
2095df87b21SJeff Roberson 	vm_offset_t base;
210e4eb384bSBosko Milekic 
2118441d1e8SJeff Roberson 	vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
2128441d1e8SJeff Roberson 	vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
2135df87b21SJeff Roberson 	    PAGE_SIZE, 0, M_WAITOK);
2145df87b21SJeff Roberson 	memguard_base = base;
215e4eb384bSBosko Milekic 
216e4eb384bSBosko Milekic 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
217e3813573SMatthew D Fleming 	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
218e3813573SMatthew D Fleming 	printf("\tMEMGUARD map size: %jd KBytes\n",
219e3813573SMatthew D Fleming 	    (uintmax_t)memguard_mapsize >> 10);
220e4eb384bSBosko Milekic }
221e4eb384bSBosko Milekic 
222e4eb384bSBosko Milekic /*
223e3813573SMatthew D Fleming  * Run things that can't be done as early as memguard_init().
224e3813573SMatthew D Fleming  */
225e3813573SMatthew D Fleming static void
memguard_sysinit(void)226e3813573SMatthew D Fleming memguard_sysinit(void)
227e3813573SMatthew D Fleming {
228e3813573SMatthew D Fleming 	struct sysctl_oid_list *parent;
229e3813573SMatthew D Fleming 
230e3813573SMatthew D Fleming 	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
231ccc5d6ddSMark Johnston 	SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart",
232ccc5d6ddSMark Johnston 	    CTLFLAG_RD, &memguard_base,
233ccc5d6ddSMark Johnston 	    "MemGuard KVA base");
234ccc5d6ddSMark Johnston 	SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit",
235ccc5d6ddSMark Johnston 	    CTLFLAG_RD, &memguard_mapsize,
236ccc5d6ddSMark Johnston 	    "MemGuard KVA size");
237ccc5d6ddSMark Johnston 	SYSCTL_ADD_PROC(NULL, parent, OID_AUTO, "mapused",
238a314aba8SMateusz Guzik 	    CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_ULONG, NULL, 0, memguard_sysctl_mapused, "LU",
239ccc5d6ddSMark Johnston 	    "MemGuard KVA used");
240e3813573SMatthew D Fleming }
241e3813573SMatthew D Fleming SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
242e3813573SMatthew D Fleming 
243e3813573SMatthew D Fleming /*
244e3813573SMatthew D Fleming  * v2sizep() converts a virtual address of the first page allocated for
245e3813573SMatthew D Fleming  * an item to a pointer to u_long recording the size of the original
246e3813573SMatthew D Fleming  * allocation request.
247e3813573SMatthew D Fleming  *
248e3813573SMatthew D Fleming  * This routine is very similar to those defined by UMA in uma_int.h.
249e3813573SMatthew D Fleming  * The difference is that this routine stores the originally allocated
250e3813573SMatthew D Fleming  * size in one of the page's fields that is unused when the page is
251e3813573SMatthew D Fleming  * wired rather than the object field, which is used.
252e3813573SMatthew D Fleming  */
253e3813573SMatthew D Fleming static u_long *
v2sizep(vm_offset_t va)254e3813573SMatthew D Fleming v2sizep(vm_offset_t va)
255e3813573SMatthew D Fleming {
256a2a200a2SMatthew D Fleming 	vm_paddr_t pa;
257e3813573SMatthew D Fleming 	struct vm_page *p;
258e3813573SMatthew D Fleming 
259a2a200a2SMatthew D Fleming 	pa = pmap_kextract(va);
260a2a200a2SMatthew D Fleming 	if (pa == 0)
261a2a200a2SMatthew D Fleming 		panic("MemGuard detected double-free of %p", (void *)va);
262a2a200a2SMatthew D Fleming 	p = PHYS_TO_VM_PAGE(pa);
2635cff1f4dSMark Johnston 	KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
264e3813573SMatthew D Fleming 	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
265c325e866SKonstantin Belousov 	return (&p->plinks.memguard.p);
266e3813573SMatthew D Fleming }
267e3813573SMatthew D Fleming 
2685df87b21SJeff Roberson static u_long *
v2sizev(vm_offset_t va)2695df87b21SJeff Roberson v2sizev(vm_offset_t va)
2705df87b21SJeff Roberson {
2715df87b21SJeff Roberson 	vm_paddr_t pa;
2725df87b21SJeff Roberson 	struct vm_page *p;
2735df87b21SJeff Roberson 
2745df87b21SJeff Roberson 	pa = pmap_kextract(va);
2755df87b21SJeff Roberson 	if (pa == 0)
2765df87b21SJeff Roberson 		panic("MemGuard detected double-free of %p", (void *)va);
2775df87b21SJeff Roberson 	p = PHYS_TO_VM_PAGE(pa);
2785cff1f4dSMark Johnston 	KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
2795df87b21SJeff Roberson 	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
280c325e866SKonstantin Belousov 	return (&p->plinks.memguard.v);
2815df87b21SJeff Roberson }
2825df87b21SJeff Roberson 
283e3813573SMatthew D Fleming /*
284e3813573SMatthew D Fleming  * Allocate a single object of specified size with specified flags
285e3813573SMatthew D Fleming  * (either M_WAITOK or M_NOWAIT).
286e4eb384bSBosko Milekic  */
287e4eb384bSBosko Milekic void *
memguard_alloc(unsigned long req_size,int flags)288e3813573SMatthew D Fleming memguard_alloc(unsigned long req_size, int flags)
289e4eb384bSBosko Milekic {
290eadbeae5SMark Johnston 	vm_offset_t addr, origaddr;
291e3813573SMatthew D Fleming 	u_long size_p, size_v;
292ccc5d6ddSMark Johnston 	int do_guard, error, rv;
293e4eb384bSBosko Milekic 
294e3813573SMatthew D Fleming 	size_p = round_page(req_size);
295e3813573SMatthew D Fleming 	if (size_p == 0)
296e3813573SMatthew D Fleming 		return (NULL);
297ccc5d6ddSMark Johnston 
298e4eb384bSBosko Milekic 	/*
299e3813573SMatthew D Fleming 	 * To ensure there are holes on both sides of the allocation,
300ccc5d6ddSMark Johnston 	 * request 2 extra pages of KVA.  Save the value of memguard_options
301ccc5d6ddSMark Johnston 	 * so that we use a consistent value throughout this function.
302e4eb384bSBosko Milekic 	 */
303e3813573SMatthew D Fleming 	size_v = size_p;
3048d689e04SGleb Smirnoff 	do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
305e3813573SMatthew D Fleming 	if (do_guard)
306e3813573SMatthew D Fleming 		size_v += 2 * PAGE_SIZE;
307e4eb384bSBosko Milekic 
308e3813573SMatthew D Fleming 	/*
309e3813573SMatthew D Fleming 	 * When we pass our memory limit, reject sub-page allocations.
310e3813573SMatthew D Fleming 	 * Page-size and larger allocations will use the same amount
311e3813573SMatthew D Fleming 	 * of physical memory whether we allocate or hand off to
3126d6a03d7SJeff Roberson 	 * malloc_large(), so keep those.
313e3813573SMatthew D Fleming 	 */
3148441d1e8SJeff Roberson 	if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
315e3813573SMatthew D Fleming 	    req_size < PAGE_SIZE) {
316e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
317e3813573SMatthew D Fleming 		memguard_fail_pgs++;
318e3813573SMatthew D Fleming 		goto out;
319e4eb384bSBosko Milekic 	}
320ccc5d6ddSMark Johnston 
321e3813573SMatthew D Fleming 	/*
322ccc5d6ddSMark Johnston 	 * Attempt to avoid address reuse for as long as possible, to increase
323ccc5d6ddSMark Johnston 	 * the likelihood of catching a use-after-free.
324e3813573SMatthew D Fleming 	 */
325ccc5d6ddSMark Johnston 	error = vmem_alloc(memguard_arena, size_v, M_NEXTFIT | M_NOWAIT,
326ccc5d6ddSMark Johnston 	    &origaddr);
327ccc5d6ddSMark Johnston 	if (error != 0) {
328e3813573SMatthew D Fleming 		memguard_fail_kva++;
329e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
330e3813573SMatthew D Fleming 		goto out;
331e3813573SMatthew D Fleming 	}
332eadbeae5SMark Johnston 	addr = origaddr;
333e3813573SMatthew D Fleming 	if (do_guard)
334e3813573SMatthew D Fleming 		addr += PAGE_SIZE;
3352e47807cSJeff Roberson 	rv = kmem_back(kernel_object, addr, size_p, flags);
336e3813573SMatthew D Fleming 	if (rv != KERN_SUCCESS) {
337eadbeae5SMark Johnston 		vmem_xfree(memguard_arena, origaddr, size_v);
338e3813573SMatthew D Fleming 		memguard_fail_pgs++;
339e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
340e3813573SMatthew D Fleming 		goto out;
341e3813573SMatthew D Fleming 	}
342e3813573SMatthew D Fleming 	*v2sizep(trunc_page(addr)) = req_size;
3435df87b21SJeff Roberson 	*v2sizev(trunc_page(addr)) = size_v;
344e3813573SMatthew D Fleming 	memguard_succ++;
345e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE) {
346e3813573SMatthew D Fleming 		memguard_wasted += (PAGE_SIZE - req_size);
347e3813573SMatthew D Fleming 		if (do_guard) {
348e3813573SMatthew D Fleming 			/*
349e3813573SMatthew D Fleming 			 * Align the request to 16 bytes, and return
350e3813573SMatthew D Fleming 			 * an address near the end of the page, to
351e3813573SMatthew D Fleming 			 * better detect array overrun.
352e3813573SMatthew D Fleming 			 */
353e3813573SMatthew D Fleming 			req_size = roundup2(req_size, 16);
354e3813573SMatthew D Fleming 			addr += (PAGE_SIZE - req_size);
355e3813573SMatthew D Fleming 		}
356e3813573SMatthew D Fleming 	}
357e3813573SMatthew D Fleming out:
358e3813573SMatthew D Fleming 	return ((void *)addr);
359e3813573SMatthew D Fleming }
360e3813573SMatthew D Fleming 
361e3813573SMatthew D Fleming int
is_memguard_addr(void * addr)362e3813573SMatthew D Fleming is_memguard_addr(void *addr)
363e3813573SMatthew D Fleming {
364e3813573SMatthew D Fleming 	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
365e3813573SMatthew D Fleming 
3665df87b21SJeff Roberson 	return (a >= memguard_base && a < memguard_base + memguard_mapsize);
367e4eb384bSBosko Milekic }
368e4eb384bSBosko Milekic 
369e4eb384bSBosko Milekic /*
370e4eb384bSBosko Milekic  * Free specified single object.
371e4eb384bSBosko Milekic  */
372e4eb384bSBosko Milekic void
memguard_free(void * ptr)373e3813573SMatthew D Fleming memguard_free(void *ptr)
374e4eb384bSBosko Milekic {
375e3813573SMatthew D Fleming 	vm_offset_t addr;
3765df87b21SJeff Roberson 	u_long req_size, size, sizev;
377e3813573SMatthew D Fleming 	char *temp;
378e3813573SMatthew D Fleming 	int i;
379e4eb384bSBosko Milekic 
380e3813573SMatthew D Fleming 	addr = trunc_page((uintptr_t)ptr);
381e3813573SMatthew D Fleming 	req_size = *v2sizep(addr);
3825df87b21SJeff Roberson 	sizev = *v2sizev(addr);
383e3813573SMatthew D Fleming 	size = round_page(req_size);
38403412565SBosko Milekic 
38503412565SBosko Milekic 	/*
386e3813573SMatthew D Fleming 	 * Page should not be guarded right now, so force a write.
387e3813573SMatthew D Fleming 	 * The purpose of this is to increase the likelihood of
388e3813573SMatthew D Fleming 	 * catching a double-free, but not necessarily a
389e3813573SMatthew D Fleming 	 * tamper-after-free (the second thread freeing might not
390e3813573SMatthew D Fleming 	 * write before freeing, so this forces it to and,
391e3813573SMatthew D Fleming 	 * subsequently, trigger a fault).
39203412565SBosko Milekic 	 */
393e3813573SMatthew D Fleming 	temp = ptr;
394e3813573SMatthew D Fleming 	for (i = 0; i < size; i += PAGE_SIZE)
395e3813573SMatthew D Fleming 		temp[i] = 'M';
39603412565SBosko Milekic 
397e3813573SMatthew D Fleming 	/*
398e3813573SMatthew D Fleming 	 * This requires carnal knowledge of the implementation of
399e3813573SMatthew D Fleming 	 * kmem_free(), but since we've already replaced kmem_malloc()
400e3813573SMatthew D Fleming 	 * above, it's not really any worse.  We want to use the
401e3813573SMatthew D Fleming 	 * vm_map lock to serialize updates to memguard_wasted, since
402e3813573SMatthew D Fleming 	 * we had the lock at increment.
403e3813573SMatthew D Fleming 	 */
4042e47807cSJeff Roberson 	kmem_unback(kernel_object, addr, size);
4055df87b21SJeff Roberson 	if (sizev > size)
4065df87b21SJeff Roberson 		addr -= PAGE_SIZE;
4078441d1e8SJeff Roberson 	vmem_xfree(memguard_arena, addr, sizev);
408e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE)
409e3813573SMatthew D Fleming 		memguard_wasted -= (PAGE_SIZE - req_size);
410e4eb384bSBosko Milekic }
411e4eb384bSBosko Milekic 
4126d3ed393SMatthew D Fleming /*
4136d3ed393SMatthew D Fleming  * Re-allocate an allocation that was originally guarded.
4146d3ed393SMatthew D Fleming  */
4156d3ed393SMatthew D Fleming void *
memguard_realloc(void * addr,unsigned long size,struct malloc_type * mtp,int flags)4166d3ed393SMatthew D Fleming memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
4176d3ed393SMatthew D Fleming     int flags)
4186d3ed393SMatthew D Fleming {
4196d3ed393SMatthew D Fleming 	void *newaddr;
4206d3ed393SMatthew D Fleming 	u_long old_size;
4216d3ed393SMatthew D Fleming 
4226d3ed393SMatthew D Fleming 	/*
4236d3ed393SMatthew D Fleming 	 * Allocate the new block.  Force the allocation to be guarded
4246d3ed393SMatthew D Fleming 	 * as the original may have been guarded through random
4256d3ed393SMatthew D Fleming 	 * chance, and that should be preserved.
4266d3ed393SMatthew D Fleming 	 */
4276d3ed393SMatthew D Fleming 	if ((newaddr = memguard_alloc(size, flags)) == NULL)
4286d3ed393SMatthew D Fleming 		return (NULL);
4296d3ed393SMatthew D Fleming 
4306d3ed393SMatthew D Fleming 	/* Copy over original contents. */
4316d3ed393SMatthew D Fleming 	old_size = *v2sizep(trunc_page((uintptr_t)addr));
4326d3ed393SMatthew D Fleming 	bcopy(addr, newaddr, min(size, old_size));
4336d3ed393SMatthew D Fleming 	memguard_free(addr);
4346d3ed393SMatthew D Fleming 	return (newaddr);
4356d3ed393SMatthew D Fleming }
4366d3ed393SMatthew D Fleming 
4378d689e04SGleb Smirnoff static int
memguard_cmp(unsigned long size)4388d689e04SGleb Smirnoff memguard_cmp(unsigned long size)
439d362c40dSPawel Jakub Dawidek {
440d362c40dSPawel Jakub Dawidek 
441e3813573SMatthew D Fleming 	if (size < memguard_minsize) {
442e3813573SMatthew D Fleming 		memguard_minsize_reject++;
443e3813573SMatthew D Fleming 		return (0);
444e3813573SMatthew D Fleming 	}
4458d689e04SGleb Smirnoff 	if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
446e3813573SMatthew D Fleming 		return (1);
447e3813573SMatthew D Fleming 	if (memguard_frequency > 0 &&
448e3813573SMatthew D Fleming 	    (random() % 100000) < memguard_frequency) {
449e3813573SMatthew D Fleming 		memguard_frequency_hits++;
450e3813573SMatthew D Fleming 		return (1);
451e3813573SMatthew D Fleming 	}
4528d689e04SGleb Smirnoff 
4538d689e04SGleb Smirnoff 	return (0);
4548d689e04SGleb Smirnoff }
4558d689e04SGleb Smirnoff 
4568d689e04SGleb Smirnoff int
memguard_cmp_mtp(struct malloc_type * mtp,unsigned long size)4578d689e04SGleb Smirnoff memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
4588d689e04SGleb Smirnoff {
4598d689e04SGleb Smirnoff 
4608d689e04SGleb Smirnoff 	if (memguard_cmp(size))
4618d689e04SGleb Smirnoff 		return(1);
4628d689e04SGleb Smirnoff 
463d362c40dSPawel Jakub Dawidek #if 1
464d362c40dSPawel Jakub Dawidek 	/*
465f167c46eSGordon Bergling 	 * The safest way of comparison is to always compare short description
466d362c40dSPawel Jakub Dawidek 	 * string of memory type, but it is also the slowest way.
467d362c40dSPawel Jakub Dawidek 	 */
468d362c40dSPawel Jakub Dawidek 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
469d362c40dSPawel Jakub Dawidek #else
470d362c40dSPawel Jakub Dawidek 	/*
471d362c40dSPawel Jakub Dawidek 	 * If we compare pointers, there are two possible problems:
472d362c40dSPawel Jakub Dawidek 	 * 1. Memory type was unloaded and new memory type was allocated at the
473d362c40dSPawel Jakub Dawidek 	 *    same address.
474d362c40dSPawel Jakub Dawidek 	 * 2. Memory type was unloaded and loaded again, but allocated at a
475d362c40dSPawel Jakub Dawidek 	 *    different address.
476d362c40dSPawel Jakub Dawidek 	 */
477d362c40dSPawel Jakub Dawidek 	if (vm_memguard_mtype != NULL)
478d362c40dSPawel Jakub Dawidek 		return (mtp == vm_memguard_mtype);
479d362c40dSPawel Jakub Dawidek 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
480d362c40dSPawel Jakub Dawidek 		vm_memguard_mtype = mtp;
481d362c40dSPawel Jakub Dawidek 		return (1);
482d362c40dSPawel Jakub Dawidek 	}
483d362c40dSPawel Jakub Dawidek 	return (0);
484d362c40dSPawel Jakub Dawidek #endif
485d362c40dSPawel Jakub Dawidek }
4868d689e04SGleb Smirnoff 
4878d689e04SGleb Smirnoff int
memguard_cmp_zone(uma_zone_t zone)4888d689e04SGleb Smirnoff memguard_cmp_zone(uma_zone_t zone)
4898d689e04SGleb Smirnoff {
4908d689e04SGleb Smirnoff 
4918d689e04SGleb Smirnoff 	if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
4928d689e04SGleb Smirnoff 	    zone->uz_flags & UMA_ZONE_NOFREE)
4938d689e04SGleb Smirnoff 		return (0);
4948d689e04SGleb Smirnoff 
4958d689e04SGleb Smirnoff 	if (memguard_cmp(zone->uz_size))
4968d689e04SGleb Smirnoff 		return (1);
4978d689e04SGleb Smirnoff 
4988d689e04SGleb Smirnoff 	/*
499f167c46eSGordon Bergling 	 * The safest way of comparison is to always compare zone name,
5008d689e04SGleb Smirnoff 	 * but it is also the slowest way.
5018d689e04SGleb Smirnoff 	 */
5028d689e04SGleb Smirnoff 	return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
5038d689e04SGleb Smirnoff }
5045d4bf057SVladimir Kondratyev 
5055d4bf057SVladimir Kondratyev unsigned long
memguard_get_req_size(const void * addr)5065d4bf057SVladimir Kondratyev memguard_get_req_size(const void *addr)
5075d4bf057SVladimir Kondratyev {
5085d4bf057SVladimir Kondratyev 	return (*v2sizep(trunc_page((uintptr_t)addr)));
5095d4bf057SVladimir Kondratyev }
510