1 /*
2  * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/byteorder.h>
30 #include <sys/kernel.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kmem.h>
34 #include <sys/kmem_cache.h>
35 #include <sys/debug.h>
36 #include <sys/mutex.h>
37 #include <sys/vmmeter.h>
38 
39 
40 #include <vm/vm_page.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_map.h>
44 
45 #ifdef KMEM_DEBUG
46 #include <sys/queue.h>
47 #include <sys/stack.h>
48 #endif
49 
50 #ifdef _KERNEL
51 MALLOC_DEFINE(M_SOLARIS, "solaris", "Solaris");
52 #else
53 #define	malloc(size, type, flags)	malloc(size)
54 #define	free(addr, type)		free(addr)
55 #endif
56 
57 #ifdef KMEM_DEBUG
58 struct kmem_item {
59 	struct stack	stack;
60 	LIST_ENTRY(kmem_item) next;
61 };
62 static LIST_HEAD(, kmem_item) kmem_items;
63 static struct mtx kmem_items_mtx;
64 MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
65 #endif	/* KMEM_DEBUG */
66 
67 #include <sys/vmem.h>
68 
69 void *
zfs_kmem_alloc(size_t size,int kmflags)70 zfs_kmem_alloc(size_t size, int kmflags)
71 {
72 	void *p;
73 #ifdef KMEM_DEBUG
74 	struct kmem_item *i;
75 
76 	size += sizeof (struct kmem_item);
77 #endif
78 	p = malloc(MAX(size, 16), M_SOLARIS, kmflags);
79 #ifndef _KERNEL
80 	if (kmflags & KM_SLEEP)
81 		assert(p != NULL);
82 #endif
83 #ifdef KMEM_DEBUG
84 	if (p != NULL) {
85 		i = p;
86 		p = (uint8_t *)p + sizeof (struct kmem_item);
87 		stack_save(&i->stack);
88 		mtx_lock(&kmem_items_mtx);
89 		LIST_INSERT_HEAD(&kmem_items, i, next);
90 		mtx_unlock(&kmem_items_mtx);
91 	}
92 #endif
93 	return (p);
94 }
95 
96 void
zfs_kmem_free(void * buf,size_t size __unused)97 zfs_kmem_free(void *buf, size_t size __unused)
98 {
99 #ifdef KMEM_DEBUG
100 	if (buf == NULL) {
101 		printf("%s: attempt to free NULL\n", __func__);
102 		return;
103 	}
104 	struct kmem_item *i;
105 
106 	buf = (uint8_t *)buf - sizeof (struct kmem_item);
107 	mtx_lock(&kmem_items_mtx);
108 	LIST_FOREACH(i, &kmem_items, next) {
109 		if (i == buf)
110 			break;
111 	}
112 	ASSERT3P(i, !=, NULL);
113 	LIST_REMOVE(i, next);
114 	mtx_unlock(&kmem_items_mtx);
115 	memset(buf, 0xDC, MAX(size, 16));
116 #endif
117 	free(buf, M_SOLARIS);
118 }
119 
120 static uint64_t kmem_size_val;
121 
122 static void
kmem_size_init(void * unused __unused)123 kmem_size_init(void *unused __unused)
124 {
125 
126 	kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE;
127 	if (kmem_size_val > vm_kmem_size)
128 		kmem_size_val = vm_kmem_size;
129 }
130 SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
131 
132 uint64_t
kmem_size(void)133 kmem_size(void)
134 {
135 
136 	return (kmem_size_val);
137 }
138 
139 static int
kmem_std_constructor(void * mem,int size __unused,void * private,int flags)140 kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
141 {
142 	struct kmem_cache *cache = private;
143 
144 	return (cache->kc_constructor(mem, cache->kc_private, flags));
145 }
146 
147 static void
kmem_std_destructor(void * mem,int size __unused,void * private)148 kmem_std_destructor(void *mem, int size __unused, void *private)
149 {
150 	struct kmem_cache *cache = private;
151 
152 	cache->kc_destructor(mem, cache->kc_private);
153 }
154 
155 kmem_cache_t *
kmem_cache_create(const char * name,size_t bufsize,size_t align,int (* constructor)(void *,void *,int),void (* destructor)(void *,void *),void (* reclaim)(void *)__unused,void * private,vmem_t * vmp,int cflags)156 kmem_cache_create(const char *name, size_t bufsize, size_t align,
157     int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
158     void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags)
159 {
160 	kmem_cache_t *cache;
161 
162 	ASSERT3P(vmp, ==, NULL);
163 
164 	cache = kmem_alloc(sizeof (*cache), KM_SLEEP);
165 	strlcpy(cache->kc_name, name, sizeof (cache->kc_name));
166 	cache->kc_constructor = constructor;
167 	cache->kc_destructor = destructor;
168 	cache->kc_private = private;
169 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
170 	cache->kc_zone = uma_zcreate(cache->kc_name, bufsize,
171 	    constructor != NULL ? kmem_std_constructor : NULL,
172 	    destructor != NULL ? kmem_std_destructor : NULL,
173 	    NULL, NULL, align > 0 ? align - 1 : 0, cflags);
174 #else
175 	cache->kc_size = bufsize;
176 #endif
177 
178 	return (cache);
179 }
180 
181 void
kmem_cache_destroy(kmem_cache_t * cache)182 kmem_cache_destroy(kmem_cache_t *cache)
183 {
184 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
185 	uma_zdestroy(cache->kc_zone);
186 #endif
187 	kmem_free(cache, sizeof (*cache));
188 }
189 
190 void *
kmem_cache_alloc(kmem_cache_t * cache,int flags)191 kmem_cache_alloc(kmem_cache_t *cache, int flags)
192 {
193 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
194 	return (uma_zalloc_arg(cache->kc_zone, cache, flags));
195 #else
196 	void *p;
197 
198 	p = kmem_alloc(cache->kc_size, flags);
199 	if (p != NULL && cache->kc_constructor != NULL)
200 		kmem_std_constructor(p, cache->kc_size, cache, flags);
201 	return (p);
202 #endif
203 }
204 
205 void
kmem_cache_free(kmem_cache_t * cache,void * buf)206 kmem_cache_free(kmem_cache_t *cache, void *buf)
207 {
208 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
209 	uma_zfree_arg(cache->kc_zone, buf, cache);
210 #else
211 	if (cache->kc_destructor != NULL)
212 		kmem_std_destructor(buf, cache->kc_size, cache);
213 	kmem_free(buf, cache->kc_size);
214 #endif
215 }
216 
217 /*
218  * Allow our caller to determine if there are running reaps.
219  *
220  * This call is very conservative and may return B_TRUE even when
221  * reaping activity isn't active. If it returns B_FALSE, then reaping
222  * activity is definitely inactive.
223  */
224 boolean_t
kmem_cache_reap_active(void)225 kmem_cache_reap_active(void)
226 {
227 
228 	return (B_FALSE);
229 }
230 
231 /*
232  * Reap (almost) everything soon.
233  *
234  * Note: this does not wait for the reap-tasks to complete. Caller
235  * should use kmem_cache_reap_active() (above) and/or moderation to
236  * avoid scheduling too many reap-tasks.
237  */
238 #ifdef _KERNEL
239 void
kmem_cache_reap_soon(kmem_cache_t * cache)240 kmem_cache_reap_soon(kmem_cache_t *cache)
241 {
242 #ifndef KMEM_DEBUG
243 #if __FreeBSD_version >= 1300043
244 	uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN);
245 #else
246 	zone_drain(cache->kc_zone);
247 #endif
248 #endif
249 }
250 
251 void
kmem_reap(void)252 kmem_reap(void)
253 {
254 #if __FreeBSD_version >= 1300043
255 	uma_reclaim(UMA_RECLAIM_TRIM);
256 #else
257 	uma_reclaim();
258 #endif
259 }
260 #else
261 void
kmem_cache_reap_soon(kmem_cache_t * cache __unused)262 kmem_cache_reap_soon(kmem_cache_t *cache __unused)
263 {
264 }
265 
266 void
kmem_reap(void)267 kmem_reap(void)
268 {
269 }
270 #endif
271 
272 int
kmem_debugging(void)273 kmem_debugging(void)
274 {
275 	return (0);
276 }
277 
278 void *
calloc(size_t n,size_t s)279 calloc(size_t n, size_t s)
280 {
281 	return (kmem_zalloc(n * s, KM_NOSLEEP));
282 }
283 
284 char *
kmem_vasprintf(const char * fmt,va_list adx)285 kmem_vasprintf(const char *fmt, va_list adx)
286 {
287 	char *msg;
288 	va_list adx2;
289 
290 	va_copy(adx2, adx);
291 	msg = kmem_alloc(vsnprintf(NULL, 0, fmt, adx) + 1, KM_SLEEP);
292 	(void) vsprintf(msg, fmt, adx2);
293 	va_end(adx2);
294 
295 	return (msg);
296 }
297 
298 #include <vm/uma.h>
299 #include <vm/uma_int.h>
300 #ifdef KMEM_DEBUG
301 #error "KMEM_DEBUG not currently supported"
302 #endif
303 
304 uint64_t
spl_kmem_cache_inuse(kmem_cache_t * cache)305 spl_kmem_cache_inuse(kmem_cache_t *cache)
306 {
307 	return (uma_zone_get_cur(cache->kc_zone));
308 }
309 
310 uint64_t
spl_kmem_cache_entry_size(kmem_cache_t * cache)311 spl_kmem_cache_entry_size(kmem_cache_t *cache)
312 {
313 	return (cache->kc_zone->uz_size);
314 }
315 
316 /*
317  * Register a move callback for cache defragmentation.
318  * XXX: Unimplemented but harmless to stub out for now.
319  */
320 void
spl_kmem_cache_set_move(kmem_cache_t * skc,kmem_cbrc_t (move)(void *,void *,size_t,void *))321 spl_kmem_cache_set_move(kmem_cache_t *skc,
322     kmem_cbrc_t (move)(void *, void *, size_t, void *))
323 {
324 	ASSERT3P(move, !=, NULL);
325 }
326 
327 #ifdef KMEM_DEBUG
328 void kmem_show(void *);
329 void
kmem_show(void * dummy __unused)330 kmem_show(void *dummy __unused)
331 {
332 	struct kmem_item *i;
333 
334 	mtx_lock(&kmem_items_mtx);
335 	if (LIST_EMPTY(&kmem_items))
336 		printf("KMEM_DEBUG: No leaked elements.\n");
337 	else {
338 		printf("KMEM_DEBUG: Leaked elements:\n\n");
339 		LIST_FOREACH(i, &kmem_items, next) {
340 			printf("address=%p\n", i);
341 			stack_print_ddb(&i->stack);
342 			printf("\n");
343 		}
344 	}
345 	mtx_unlock(&kmem_items_mtx);
346 }
347 
348 SYSUNINIT(sol_kmem, SI_SUB_CPU, SI_ORDER_FIRST, kmem_show, NULL);
349 #endif	/* KMEM_DEBUG */
350