1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <linux/percpu_compat.h>
25 #include <sys/kmem.h>
26 #include <sys/kmem_cache.h>
27 #include <sys/taskq.h>
28 #include <sys/timer.h>
29 #include <sys/vmem.h>
30 #include <sys/wait.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/prefetch.h>
34 
35 /*
36  * Within the scope of spl-kmem.c file the kmem_cache_* definitions
37  * are removed to allow access to the real Linux slab allocator.
38  */
39 #undef kmem_cache_destroy
40 #undef kmem_cache_create
41 #undef kmem_cache_alloc
42 #undef kmem_cache_free
43 
44 
45 /*
46  * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
47  * with smp_mb__{before,after}_atomic() because they were redundant. This is
48  * only used inside our SLAB allocator, so we implement an internal wrapper
49  * here to give us smp_mb__{before,after}_atomic() on older kernels.
50  */
51 #ifndef smp_mb__before_atomic
52 #define	smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
53 #endif
54 
55 #ifndef smp_mb__after_atomic
56 #define	smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
57 #endif
58 
59 /* BEGIN CSTYLED */
60 /*
61  * Cache magazines are an optimization designed to minimize the cost of
62  * allocating memory.  They do this by keeping a per-cpu cache of recently
63  * freed objects, which can then be reallocated without taking a lock. This
64  * can improve performance on highly contended caches.  However, because
65  * objects in magazines will prevent otherwise empty slabs from being
66  * immediately released this may not be ideal for low memory machines.
67  *
68  * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
69  * magazine size.  When this value is set to 0 the magazine size will be
70  * automatically determined based on the object size.  Otherwise magazines
71  * will be limited to 2-256 objects per magazine (i.e per cpu).  Magazines
72  * may never be entirely disabled in this implementation.
73  */
74 static unsigned int spl_kmem_cache_magazine_size = 0;
75 module_param(spl_kmem_cache_magazine_size, uint, 0444);
76 MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
77 	"Default magazine size (2-256), set automatically (0)");
78 
79 /*
80  * The default behavior is to report the number of objects remaining in the
81  * cache.  This allows the Linux VM to repeatedly reclaim objects from the
82  * cache when memory is low satisfy other memory allocations.  Alternately,
83  * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
84  * is reclaimed.  This may increase the likelihood of out of memory events.
85  */
86 static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
87 module_param(spl_kmem_cache_reclaim, uint, 0644);
88 MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
89 
90 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
91 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
92 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
93 
94 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
95 module_param(spl_kmem_cache_max_size, uint, 0644);
96 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
97 
98 /*
99  * For small objects the Linux slab allocator should be used to make the most
100  * efficient use of the memory.  However, large objects are not supported by
101  * the Linux slab and therefore the SPL implementation is preferred.  A cutoff
102  * of 16K was determined to be optimal for architectures using 4K pages and
103  * to also work well on architecutres using larger 64K page sizes.
104  */
105 static unsigned int spl_kmem_cache_slab_limit = 16384;
106 module_param(spl_kmem_cache_slab_limit, uint, 0644);
107 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
108 	"Objects less than N bytes use the Linux slab");
109 
110 /*
111  * The number of threads available to allocate new slabs for caches.  This
112  * should not need to be tuned but it is available for performance analysis.
113  */
114 static unsigned int spl_kmem_cache_kmem_threads = 4;
115 module_param(spl_kmem_cache_kmem_threads, uint, 0444);
116 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
117 	"Number of spl_kmem_cache threads");
118 /* END CSTYLED */
119 
120 /*
121  * Slab allocation interfaces
122  *
123  * While the Linux slab implementation was inspired by the Solaris
124  * implementation I cannot use it to emulate the Solaris APIs.  I
125  * require two features which are not provided by the Linux slab.
126  *
127  * 1) Constructors AND destructors.  Recent versions of the Linux
128  *    kernel have removed support for destructors.  This is a deal
129  *    breaker for the SPL which contains particularly expensive
130  *    initializers for mutex's, condition variables, etc.  We also
131  *    require a minimal level of cleanup for these data types unlike
132  *    many Linux data types which do need to be explicitly destroyed.
133  *
134  * 2) Virtual address space backed slab.  Callers of the Solaris slab
135  *    expect it to work well for both small are very large allocations.
136  *    Because of memory fragmentation the Linux slab which is backed
137  *    by kmalloc'ed memory performs very badly when confronted with
138  *    large numbers of large allocations.  Basing the slab on the
139  *    virtual address space removes the need for contiguous pages
140  *    and greatly improve performance for large allocations.
141  *
142  * For these reasons, the SPL has its own slab implementation with
143  * the needed features.  It is not as highly optimized as either the
144  * Solaris or Linux slabs, but it should get me most of what is
145  * needed until it can be optimized or obsoleted by another approach.
146  *
147  * One serious concern I do have about this method is the relatively
148  * small virtual address space on 32bit arches.  This will seriously
149  * constrain the size of the slab caches and their performance.
150  */
151 
152 struct list_head spl_kmem_cache_list;   /* List of caches */
153 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
154 static taskq_t *spl_kmem_cache_taskq;   /* Task queue for aging / reclaim */
155 
156 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
157 
158 static void *
159 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
160 {
161 	gfp_t lflags = kmem_flags_convert(flags);
162 	void *ptr;
163 
164 	ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
165 
166 	/* Resulting allocated memory will be page aligned */
167 	ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
168 
169 	return (ptr);
170 }
171 
172 static void
173 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
174 {
175 	ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
176 
177 	/*
178 	 * The Linux direct reclaim path uses this out of band value to
179 	 * determine if forward progress is being made.  Normally this is
180 	 * incremented by kmem_freepages() which is part of the various
181 	 * Linux slab implementations.  However, since we are using none
182 	 * of that infrastructure we are responsible for incrementing it.
183 	 */
184 	if (current->reclaim_state)
185 #ifdef	HAVE_RECLAIM_STATE_RECLAIMED
186 		current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
187 #else
188 		current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
189 #endif
190 	vfree(ptr);
191 }
192 
193 /*
194  * Required space for each aligned sks.
195  */
196 static inline uint32_t
197 spl_sks_size(spl_kmem_cache_t *skc)
198 {
199 	return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
200 	    skc->skc_obj_align, uint32_t));
201 }
202 
203 /*
204  * Required space for each aligned object.
205  */
206 static inline uint32_t
207 spl_obj_size(spl_kmem_cache_t *skc)
208 {
209 	uint32_t align = skc->skc_obj_align;
210 
211 	return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
212 	    P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
213 }
214 
215 uint64_t
216 spl_kmem_cache_inuse(kmem_cache_t *cache)
217 {
218 	return (cache->skc_obj_total);
219 }
220 EXPORT_SYMBOL(spl_kmem_cache_inuse);
221 
222 uint64_t
223 spl_kmem_cache_entry_size(kmem_cache_t *cache)
224 {
225 	return (cache->skc_obj_size);
226 }
227 EXPORT_SYMBOL(spl_kmem_cache_entry_size);
228 
229 /*
230  * Lookup the spl_kmem_object_t for an object given that object.
231  */
232 static inline spl_kmem_obj_t *
233 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
234 {
235 	return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
236 	    skc->skc_obj_align, uint32_t));
237 }
238 
239 /*
240  * It's important that we pack the spl_kmem_obj_t structure and the
241  * actual objects in to one large address space to minimize the number
242  * of calls to the allocator.  It is far better to do a few large
243  * allocations and then subdivide it ourselves.  Now which allocator
244  * we use requires balancing a few trade offs.
245  *
246  * For small objects we use kmem_alloc() because as long as you are
247  * only requesting a small number of pages (ideally just one) its cheap.
248  * However, when you start requesting multiple pages with kmem_alloc()
249  * it gets increasingly expensive since it requires contiguous pages.
250  * For this reason we shift to vmem_alloc() for slabs of large objects
251  * which removes the need for contiguous pages.  We do not use
252  * vmem_alloc() in all cases because there is significant locking
253  * overhead in __get_vm_area_node().  This function takes a single
254  * global lock when acquiring an available virtual address range which
255  * serializes all vmem_alloc()'s for all slab caches.  Using slightly
256  * different allocation functions for small and large objects should
257  * give us the best of both worlds.
258  *
259  * +------------------------+
260  * | spl_kmem_slab_t --+-+  |
261  * | skc_obj_size    <-+ |  |
262  * | spl_kmem_obj_t      |  |
263  * | skc_obj_size    <---+  |
264  * | spl_kmem_obj_t      |  |
265  * | ...                 v  |
266  * +------------------------+
267  */
268 static spl_kmem_slab_t *
269 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
270 {
271 	spl_kmem_slab_t *sks;
272 	void *base;
273 	uint32_t obj_size;
274 
275 	base = kv_alloc(skc, skc->skc_slab_size, flags);
276 	if (base == NULL)
277 		return (NULL);
278 
279 	sks = (spl_kmem_slab_t *)base;
280 	sks->sks_magic = SKS_MAGIC;
281 	sks->sks_objs = skc->skc_slab_objs;
282 	sks->sks_age = jiffies;
283 	sks->sks_cache = skc;
284 	INIT_LIST_HEAD(&sks->sks_list);
285 	INIT_LIST_HEAD(&sks->sks_free_list);
286 	sks->sks_ref = 0;
287 	obj_size = spl_obj_size(skc);
288 
289 	for (int i = 0; i < sks->sks_objs; i++) {
290 		void *obj = base + spl_sks_size(skc) + (i * obj_size);
291 
292 		ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
293 		spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
294 		sko->sko_addr = obj;
295 		sko->sko_magic = SKO_MAGIC;
296 		sko->sko_slab = sks;
297 		INIT_LIST_HEAD(&sko->sko_list);
298 		list_add_tail(&sko->sko_list, &sks->sks_free_list);
299 	}
300 
301 	return (sks);
302 }
303 
304 /*
305  * Remove a slab from complete or partial list, it must be called with
306  * the 'skc->skc_lock' held but the actual free must be performed
307  * outside the lock to prevent deadlocking on vmem addresses.
308  */
309 static void
310 spl_slab_free(spl_kmem_slab_t *sks,
311     struct list_head *sks_list, struct list_head *sko_list)
312 {
313 	spl_kmem_cache_t *skc;
314 
315 	ASSERT(sks->sks_magic == SKS_MAGIC);
316 	ASSERT(sks->sks_ref == 0);
317 
318 	skc = sks->sks_cache;
319 	ASSERT(skc->skc_magic == SKC_MAGIC);
320 
321 	/*
322 	 * Update slab/objects counters in the cache, then remove the
323 	 * slab from the skc->skc_partial_list.  Finally add the slab
324 	 * and all its objects in to the private work lists where the
325 	 * destructors will be called and the memory freed to the system.
326 	 */
327 	skc->skc_obj_total -= sks->sks_objs;
328 	skc->skc_slab_total--;
329 	list_del(&sks->sks_list);
330 	list_add(&sks->sks_list, sks_list);
331 	list_splice_init(&sks->sks_free_list, sko_list);
332 }
333 
334 /*
335  * Reclaim empty slabs at the end of the partial list.
336  */
337 static void
338 spl_slab_reclaim(spl_kmem_cache_t *skc)
339 {
340 	spl_kmem_slab_t *sks = NULL, *m = NULL;
341 	spl_kmem_obj_t *sko = NULL, *n = NULL;
342 	LIST_HEAD(sks_list);
343 	LIST_HEAD(sko_list);
344 
345 	/*
346 	 * Empty slabs and objects must be moved to a private list so they
347 	 * can be safely freed outside the spin lock.  All empty slabs are
348 	 * at the end of skc->skc_partial_list, therefore once a non-empty
349 	 * slab is found we can stop scanning.
350 	 */
351 	spin_lock(&skc->skc_lock);
352 	list_for_each_entry_safe_reverse(sks, m,
353 	    &skc->skc_partial_list, sks_list) {
354 
355 		if (sks->sks_ref > 0)
356 			break;
357 
358 		spl_slab_free(sks, &sks_list, &sko_list);
359 	}
360 	spin_unlock(&skc->skc_lock);
361 
362 	/*
363 	 * The following two loops ensure all the object destructors are run,
364 	 * and the slabs themselves are freed.  This is all done outside the
365 	 * skc->skc_lock since this allows the destructor to sleep, and
366 	 * allows us to perform a conditional reschedule when a freeing a
367 	 * large number of objects and slabs back to the system.
368 	 */
369 
370 	list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
371 		ASSERT(sko->sko_magic == SKO_MAGIC);
372 	}
373 
374 	list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
375 		ASSERT(sks->sks_magic == SKS_MAGIC);
376 		kv_free(skc, sks, skc->skc_slab_size);
377 	}
378 }
379 
380 static spl_kmem_emergency_t *
381 spl_emergency_search(struct rb_root *root, void *obj)
382 {
383 	struct rb_node *node = root->rb_node;
384 	spl_kmem_emergency_t *ske;
385 	unsigned long address = (unsigned long)obj;
386 
387 	while (node) {
388 		ske = container_of(node, spl_kmem_emergency_t, ske_node);
389 
390 		if (address < ske->ske_obj)
391 			node = node->rb_left;
392 		else if (address > ske->ske_obj)
393 			node = node->rb_right;
394 		else
395 			return (ske);
396 	}
397 
398 	return (NULL);
399 }
400 
401 static int
402 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
403 {
404 	struct rb_node **new = &(root->rb_node), *parent = NULL;
405 	spl_kmem_emergency_t *ske_tmp;
406 	unsigned long address = ske->ske_obj;
407 
408 	while (*new) {
409 		ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
410 
411 		parent = *new;
412 		if (address < ske_tmp->ske_obj)
413 			new = &((*new)->rb_left);
414 		else if (address > ske_tmp->ske_obj)
415 			new = &((*new)->rb_right);
416 		else
417 			return (0);
418 	}
419 
420 	rb_link_node(&ske->ske_node, parent, new);
421 	rb_insert_color(&ske->ske_node, root);
422 
423 	return (1);
424 }
425 
426 /*
427  * Allocate a single emergency object and track it in a red black tree.
428  */
429 static int
430 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
431 {
432 	gfp_t lflags = kmem_flags_convert(flags);
433 	spl_kmem_emergency_t *ske;
434 	int order = get_order(skc->skc_obj_size);
435 	int empty;
436 
437 	/* Last chance use a partial slab if one now exists */
438 	spin_lock(&skc->skc_lock);
439 	empty = list_empty(&skc->skc_partial_list);
440 	spin_unlock(&skc->skc_lock);
441 	if (!empty)
442 		return (-EEXIST);
443 
444 	ske = kmalloc(sizeof (*ske), lflags);
445 	if (ske == NULL)
446 		return (-ENOMEM);
447 
448 	ske->ske_obj = __get_free_pages(lflags, order);
449 	if (ske->ske_obj == 0) {
450 		kfree(ske);
451 		return (-ENOMEM);
452 	}
453 
454 	spin_lock(&skc->skc_lock);
455 	empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
456 	if (likely(empty)) {
457 		skc->skc_obj_total++;
458 		skc->skc_obj_emergency++;
459 		if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
460 			skc->skc_obj_emergency_max = skc->skc_obj_emergency;
461 	}
462 	spin_unlock(&skc->skc_lock);
463 
464 	if (unlikely(!empty)) {
465 		free_pages(ske->ske_obj, order);
466 		kfree(ske);
467 		return (-EINVAL);
468 	}
469 
470 	*obj = (void *)ske->ske_obj;
471 
472 	return (0);
473 }
474 
475 /*
476  * Locate the passed object in the red black tree and free it.
477  */
478 static int
479 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
480 {
481 	spl_kmem_emergency_t *ske;
482 	int order = get_order(skc->skc_obj_size);
483 
484 	spin_lock(&skc->skc_lock);
485 	ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
486 	if (ske) {
487 		rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
488 		skc->skc_obj_emergency--;
489 		skc->skc_obj_total--;
490 	}
491 	spin_unlock(&skc->skc_lock);
492 
493 	if (ske == NULL)
494 		return (-ENOENT);
495 
496 	free_pages(ske->ske_obj, order);
497 	kfree(ske);
498 
499 	return (0);
500 }
501 
502 /*
503  * Release objects from the per-cpu magazine back to their slab.  The flush
504  * argument contains the max number of entries to remove from the magazine.
505  */
506 static void
507 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
508 {
509 	spin_lock(&skc->skc_lock);
510 
511 	ASSERT(skc->skc_magic == SKC_MAGIC);
512 	ASSERT(skm->skm_magic == SKM_MAGIC);
513 
514 	int count = MIN(flush, skm->skm_avail);
515 	for (int i = 0; i < count; i++)
516 		spl_cache_shrink(skc, skm->skm_objs[i]);
517 
518 	skm->skm_avail -= count;
519 	memmove(skm->skm_objs, &(skm->skm_objs[count]),
520 	    sizeof (void *) * skm->skm_avail);
521 
522 	spin_unlock(&skc->skc_lock);
523 }
524 
525 /*
526  * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
527  * When on-slab we want to target spl_kmem_cache_obj_per_slab.  However,
528  * for very small objects we may end up with more than this so as not
529  * to waste space in the minimal allocation of a single page.
530  */
531 static int
532 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
533 {
534 	uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
535 
536 	sks_size = spl_sks_size(skc);
537 	obj_size = spl_obj_size(skc);
538 	max_size = (spl_kmem_cache_max_size * 1024 * 1024);
539 	tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
540 
541 	if (tgt_size <= max_size) {
542 		tgt_objs = (tgt_size - sks_size) / obj_size;
543 	} else {
544 		tgt_objs = (max_size - sks_size) / obj_size;
545 		tgt_size = (tgt_objs * obj_size) + sks_size;
546 	}
547 
548 	if (tgt_objs == 0)
549 		return (-ENOSPC);
550 
551 	*objs = tgt_objs;
552 	*size = tgt_size;
553 
554 	return (0);
555 }
556 
557 /*
558  * Make a guess at reasonable per-cpu magazine size based on the size of
559  * each object and the cost of caching N of them in each magazine.  Long
560  * term this should really adapt based on an observed usage heuristic.
561  */
562 static int
563 spl_magazine_size(spl_kmem_cache_t *skc)
564 {
565 	uint32_t obj_size = spl_obj_size(skc);
566 	int size;
567 
568 	if (spl_kmem_cache_magazine_size > 0)
569 		return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
570 
571 	/* Per-magazine sizes below assume a 4Kib page size */
572 	if (obj_size > (PAGE_SIZE * 256))
573 		size = 4;  /* Minimum 4Mib per-magazine */
574 	else if (obj_size > (PAGE_SIZE * 32))
575 		size = 16; /* Minimum 2Mib per-magazine */
576 	else if (obj_size > (PAGE_SIZE))
577 		size = 64; /* Minimum 256Kib per-magazine */
578 	else if (obj_size > (PAGE_SIZE / 4))
579 		size = 128; /* Minimum 128Kib per-magazine */
580 	else
581 		size = 256;
582 
583 	return (size);
584 }
585 
586 /*
587  * Allocate a per-cpu magazine to associate with a specific core.
588  */
589 static spl_kmem_magazine_t *
590 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
591 {
592 	spl_kmem_magazine_t *skm;
593 	int size = sizeof (spl_kmem_magazine_t) +
594 	    sizeof (void *) * skc->skc_mag_size;
595 
596 	skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
597 	if (skm) {
598 		skm->skm_magic = SKM_MAGIC;
599 		skm->skm_avail = 0;
600 		skm->skm_size = skc->skc_mag_size;
601 		skm->skm_refill = skc->skc_mag_refill;
602 		skm->skm_cache = skc;
603 		skm->skm_cpu = cpu;
604 	}
605 
606 	return (skm);
607 }
608 
609 /*
610  * Free a per-cpu magazine associated with a specific core.
611  */
612 static void
613 spl_magazine_free(spl_kmem_magazine_t *skm)
614 {
615 	ASSERT(skm->skm_magic == SKM_MAGIC);
616 	ASSERT(skm->skm_avail == 0);
617 	kfree(skm);
618 }
619 
620 /*
621  * Create all pre-cpu magazines of reasonable sizes.
622  */
623 static int
624 spl_magazine_create(spl_kmem_cache_t *skc)
625 {
626 	int i = 0;
627 
628 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
629 
630 	skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
631 	    num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
632 	skc->skc_mag_size = spl_magazine_size(skc);
633 	skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
634 
635 	for_each_possible_cpu(i) {
636 		skc->skc_mag[i] = spl_magazine_alloc(skc, i);
637 		if (!skc->skc_mag[i]) {
638 			for (i--; i >= 0; i--)
639 				spl_magazine_free(skc->skc_mag[i]);
640 
641 			kfree(skc->skc_mag);
642 			return (-ENOMEM);
643 		}
644 	}
645 
646 	return (0);
647 }
648 
649 /*
650  * Destroy all pre-cpu magazines.
651  */
652 static void
653 spl_magazine_destroy(spl_kmem_cache_t *skc)
654 {
655 	spl_kmem_magazine_t *skm;
656 	int i = 0;
657 
658 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
659 
660 	for_each_possible_cpu(i) {
661 		skm = skc->skc_mag[i];
662 		spl_cache_flush(skc, skm, skm->skm_avail);
663 		spl_magazine_free(skm);
664 	}
665 
666 	kfree(skc->skc_mag);
667 }
668 
669 /*
670  * Create a object cache based on the following arguments:
671  * name		cache name
672  * size		cache object size
673  * align	cache object alignment
674  * ctor		cache object constructor
675  * dtor		cache object destructor
676  * reclaim	cache object reclaim
677  * priv		cache private data for ctor/dtor/reclaim
678  * vmp		unused must be NULL
679  * flags
680  *	KMC_KVMEM       Force kvmem backed SPL cache
681  *	KMC_SLAB        Force Linux slab backed cache
682  *	KMC_NODEBUG	Disable debugging (unsupported)
683  */
684 spl_kmem_cache_t *
685 spl_kmem_cache_create(const char *name, size_t size, size_t align,
686     spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
687     void *priv, void *vmp, int flags)
688 {
689 	gfp_t lflags = kmem_flags_convert(KM_SLEEP);
690 	spl_kmem_cache_t *skc;
691 	int rc;
692 
693 	/*
694 	 * Unsupported flags
695 	 */
696 	ASSERT(vmp == NULL);
697 	ASSERT(reclaim == NULL);
698 
699 	might_sleep();
700 
701 	skc = kzalloc(sizeof (*skc), lflags);
702 	if (skc == NULL)
703 		return (NULL);
704 
705 	skc->skc_magic = SKC_MAGIC;
706 	skc->skc_name_size = strlen(name) + 1;
707 	skc->skc_name = kmalloc(skc->skc_name_size, lflags);
708 	if (skc->skc_name == NULL) {
709 		kfree(skc);
710 		return (NULL);
711 	}
712 	strlcpy(skc->skc_name, name, skc->skc_name_size);
713 
714 	skc->skc_ctor = ctor;
715 	skc->skc_dtor = dtor;
716 	skc->skc_private = priv;
717 	skc->skc_vmp = vmp;
718 	skc->skc_linux_cache = NULL;
719 	skc->skc_flags = flags;
720 	skc->skc_obj_size = size;
721 	skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
722 	atomic_set(&skc->skc_ref, 0);
723 
724 	INIT_LIST_HEAD(&skc->skc_list);
725 	INIT_LIST_HEAD(&skc->skc_complete_list);
726 	INIT_LIST_HEAD(&skc->skc_partial_list);
727 	skc->skc_emergency_tree = RB_ROOT;
728 	spin_lock_init(&skc->skc_lock);
729 	init_waitqueue_head(&skc->skc_waitq);
730 	skc->skc_slab_fail = 0;
731 	skc->skc_slab_create = 0;
732 	skc->skc_slab_destroy = 0;
733 	skc->skc_slab_total = 0;
734 	skc->skc_slab_alloc = 0;
735 	skc->skc_slab_max = 0;
736 	skc->skc_obj_total = 0;
737 	skc->skc_obj_alloc = 0;
738 	skc->skc_obj_max = 0;
739 	skc->skc_obj_deadlock = 0;
740 	skc->skc_obj_emergency = 0;
741 	skc->skc_obj_emergency_max = 0;
742 
743 	rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
744 	    GFP_KERNEL);
745 	if (rc != 0) {
746 		kfree(skc);
747 		return (NULL);
748 	}
749 
750 	/*
751 	 * Verify the requested alignment restriction is sane.
752 	 */
753 	if (align) {
754 		VERIFY(ISP2(align));
755 		VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
756 		VERIFY3U(align, <=, PAGE_SIZE);
757 		skc->skc_obj_align = align;
758 	}
759 
760 	/*
761 	 * When no specific type of slab is requested (kmem, vmem, or
762 	 * linuxslab) then select a cache type based on the object size
763 	 * and default tunables.
764 	 */
765 	if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
766 		if (spl_kmem_cache_slab_limit &&
767 		    size <= (size_t)spl_kmem_cache_slab_limit) {
768 			/*
769 			 * Objects smaller than spl_kmem_cache_slab_limit can
770 			 * use the Linux slab for better space-efficiency.
771 			 */
772 			skc->skc_flags |= KMC_SLAB;
773 		} else {
774 			/*
775 			 * All other objects are considered large and are
776 			 * placed on kvmem backed slabs.
777 			 */
778 			skc->skc_flags |= KMC_KVMEM;
779 		}
780 	}
781 
782 	/*
783 	 * Given the type of slab allocate the required resources.
784 	 */
785 	if (skc->skc_flags & KMC_KVMEM) {
786 		rc = spl_slab_size(skc,
787 		    &skc->skc_slab_objs, &skc->skc_slab_size);
788 		if (rc)
789 			goto out;
790 
791 		rc = spl_magazine_create(skc);
792 		if (rc)
793 			goto out;
794 	} else {
795 		unsigned long slabflags = 0;
796 
797 		if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE))
798 			goto out;
799 
800 #if defined(SLAB_USERCOPY)
801 		/*
802 		 * Required for PAX-enabled kernels if the slab is to be
803 		 * used for copying between user and kernel space.
804 		 */
805 		slabflags |= SLAB_USERCOPY;
806 #endif
807 
808 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
809 		/*
810 		 * Newer grsec patchset uses kmem_cache_create_usercopy()
811 		 * instead of SLAB_USERCOPY flag
812 		 */
813 		skc->skc_linux_cache = kmem_cache_create_usercopy(
814 		    skc->skc_name, size, align, slabflags, 0, size, NULL);
815 #else
816 		skc->skc_linux_cache = kmem_cache_create(
817 		    skc->skc_name, size, align, slabflags, NULL);
818 #endif
819 		if (skc->skc_linux_cache == NULL)
820 			goto out;
821 	}
822 
823 	down_write(&spl_kmem_cache_sem);
824 	list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
825 	up_write(&spl_kmem_cache_sem);
826 
827 	return (skc);
828 out:
829 	kfree(skc->skc_name);
830 	percpu_counter_destroy(&skc->skc_linux_alloc);
831 	kfree(skc);
832 	return (NULL);
833 }
834 EXPORT_SYMBOL(spl_kmem_cache_create);
835 
836 /*
837  * Register a move callback for cache defragmentation.
838  * XXX: Unimplemented but harmless to stub out for now.
839  */
840 void
841 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
842     kmem_cbrc_t (move)(void *, void *, size_t, void *))
843 {
844 	ASSERT(move != NULL);
845 }
846 EXPORT_SYMBOL(spl_kmem_cache_set_move);
847 
848 /*
849  * Destroy a cache and all objects associated with the cache.
850  */
851 void
852 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
853 {
854 	DECLARE_WAIT_QUEUE_HEAD(wq);
855 	taskqid_t id;
856 
857 	ASSERT(skc->skc_magic == SKC_MAGIC);
858 	ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
859 
860 	down_write(&spl_kmem_cache_sem);
861 	list_del_init(&skc->skc_list);
862 	up_write(&spl_kmem_cache_sem);
863 
864 	/* Cancel any and wait for any pending delayed tasks */
865 	VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
866 
867 	spin_lock(&skc->skc_lock);
868 	id = skc->skc_taskqid;
869 	spin_unlock(&skc->skc_lock);
870 
871 	taskq_cancel_id(spl_kmem_cache_taskq, id);
872 
873 	/*
874 	 * Wait until all current callers complete, this is mainly
875 	 * to catch the case where a low memory situation triggers a
876 	 * cache reaping action which races with this destroy.
877 	 */
878 	wait_event(wq, atomic_read(&skc->skc_ref) == 0);
879 
880 	if (skc->skc_flags & KMC_KVMEM) {
881 		spl_magazine_destroy(skc);
882 		spl_slab_reclaim(skc);
883 	} else {
884 		ASSERT(skc->skc_flags & KMC_SLAB);
885 		kmem_cache_destroy(skc->skc_linux_cache);
886 	}
887 
888 	spin_lock(&skc->skc_lock);
889 
890 	/*
891 	 * Validate there are no objects in use and free all the
892 	 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
893 	 */
894 	ASSERT3U(skc->skc_slab_alloc, ==, 0);
895 	ASSERT3U(skc->skc_obj_alloc, ==, 0);
896 	ASSERT3U(skc->skc_slab_total, ==, 0);
897 	ASSERT3U(skc->skc_obj_total, ==, 0);
898 	ASSERT3U(skc->skc_obj_emergency, ==, 0);
899 	ASSERT(list_empty(&skc->skc_complete_list));
900 
901 	ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
902 	percpu_counter_destroy(&skc->skc_linux_alloc);
903 
904 	spin_unlock(&skc->skc_lock);
905 
906 	kfree(skc->skc_name);
907 	kfree(skc);
908 }
909 EXPORT_SYMBOL(spl_kmem_cache_destroy);
910 
911 /*
912  * Allocate an object from a slab attached to the cache.  This is used to
913  * repopulate the per-cpu magazine caches in batches when they run low.
914  */
915 static void *
916 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
917 {
918 	spl_kmem_obj_t *sko;
919 
920 	ASSERT(skc->skc_magic == SKC_MAGIC);
921 	ASSERT(sks->sks_magic == SKS_MAGIC);
922 
923 	sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
924 	ASSERT(sko->sko_magic == SKO_MAGIC);
925 	ASSERT(sko->sko_addr != NULL);
926 
927 	/* Remove from sks_free_list */
928 	list_del_init(&sko->sko_list);
929 
930 	sks->sks_age = jiffies;
931 	sks->sks_ref++;
932 	skc->skc_obj_alloc++;
933 
934 	/* Track max obj usage statistics */
935 	if (skc->skc_obj_alloc > skc->skc_obj_max)
936 		skc->skc_obj_max = skc->skc_obj_alloc;
937 
938 	/* Track max slab usage statistics */
939 	if (sks->sks_ref == 1) {
940 		skc->skc_slab_alloc++;
941 
942 		if (skc->skc_slab_alloc > skc->skc_slab_max)
943 			skc->skc_slab_max = skc->skc_slab_alloc;
944 	}
945 
946 	return (sko->sko_addr);
947 }
948 
949 /*
950  * Generic slab allocation function to run by the global work queues.
951  * It is responsible for allocating a new slab, linking it in to the list
952  * of partial slabs, and then waking any waiters.
953  */
954 static int
955 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
956 {
957 	spl_kmem_slab_t *sks;
958 
959 	fstrans_cookie_t cookie = spl_fstrans_mark();
960 	sks = spl_slab_alloc(skc, flags);
961 	spl_fstrans_unmark(cookie);
962 
963 	spin_lock(&skc->skc_lock);
964 	if (sks) {
965 		skc->skc_slab_total++;
966 		skc->skc_obj_total += sks->sks_objs;
967 		list_add_tail(&sks->sks_list, &skc->skc_partial_list);
968 
969 		smp_mb__before_atomic();
970 		clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
971 		smp_mb__after_atomic();
972 	}
973 	spin_unlock(&skc->skc_lock);
974 
975 	return (sks == NULL ? -ENOMEM : 0);
976 }
977 
978 static void
979 spl_cache_grow_work(void *data)
980 {
981 	spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
982 	spl_kmem_cache_t *skc = ska->ska_cache;
983 
984 	int error = __spl_cache_grow(skc, ska->ska_flags);
985 
986 	atomic_dec(&skc->skc_ref);
987 	smp_mb__before_atomic();
988 	clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
989 	smp_mb__after_atomic();
990 	if (error == 0)
991 		wake_up_all(&skc->skc_waitq);
992 
993 	kfree(ska);
994 }
995 
996 /*
997  * Returns non-zero when a new slab should be available.
998  */
999 static int
1000 spl_cache_grow_wait(spl_kmem_cache_t *skc)
1001 {
1002 	return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
1003 }
1004 
1005 /*
1006  * No available objects on any slabs, create a new slab.  Note that this
1007  * functionality is disabled for KMC_SLAB caches which are backed by the
1008  * Linux slab.
1009  */
1010 static int
1011 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1012 {
1013 	int remaining, rc = 0;
1014 
1015 	ASSERT0(flags & ~KM_PUBLIC_MASK);
1016 	ASSERT(skc->skc_magic == SKC_MAGIC);
1017 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1018 
1019 	*obj = NULL;
1020 
1021 	/*
1022 	 * Since we can't sleep attempt an emergency allocation to satisfy
1023 	 * the request.  The only alterative is to fail the allocation but
1024 	 * it's preferable try.  The use of KM_NOSLEEP is expected to be rare.
1025 	 */
1026 	if (flags & KM_NOSLEEP)
1027 		return (spl_emergency_alloc(skc, flags, obj));
1028 
1029 	might_sleep();
1030 
1031 	/*
1032 	 * Before allocating a new slab wait for any reaping to complete and
1033 	 * then return so the local magazine can be rechecked for new objects.
1034 	 */
1035 	if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1036 		rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1037 		    TASK_UNINTERRUPTIBLE);
1038 		return (rc ? rc : -EAGAIN);
1039 	}
1040 
1041 	/*
1042 	 * Note: It would be nice to reduce the overhead of context switch
1043 	 * and improve NUMA locality, by trying to allocate a new slab in the
1044 	 * current process context with KM_NOSLEEP flag.
1045 	 *
1046 	 * However, this can't be applied to vmem/kvmem due to a bug that
1047 	 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
1048 	 */
1049 
1050 	/*
1051 	 * This is handled by dispatching a work request to the global work
1052 	 * queue.  This allows us to asynchronously allocate a new slab while
1053 	 * retaining the ability to safely fall back to a smaller synchronous
1054 	 * allocations to ensure forward progress is always maintained.
1055 	 */
1056 	if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1057 		spl_kmem_alloc_t *ska;
1058 
1059 		ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1060 		if (ska == NULL) {
1061 			clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1062 			smp_mb__after_atomic();
1063 			wake_up_all(&skc->skc_waitq);
1064 			return (-ENOMEM);
1065 		}
1066 
1067 		atomic_inc(&skc->skc_ref);
1068 		ska->ska_cache = skc;
1069 		ska->ska_flags = flags;
1070 		taskq_init_ent(&ska->ska_tqe);
1071 		taskq_dispatch_ent(spl_kmem_cache_taskq,
1072 		    spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1073 	}
1074 
1075 	/*
1076 	 * The goal here is to only detect the rare case where a virtual slab
1077 	 * allocation has deadlocked.  We must be careful to minimize the use
1078 	 * of emergency objects which are more expensive to track.  Therefore,
1079 	 * we set a very long timeout for the asynchronous allocation and if
1080 	 * the timeout is reached the cache is flagged as deadlocked.  From
1081 	 * this point only new emergency objects will be allocated until the
1082 	 * asynchronous allocation completes and clears the deadlocked flag.
1083 	 */
1084 	if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1085 		rc = spl_emergency_alloc(skc, flags, obj);
1086 	} else {
1087 		remaining = wait_event_timeout(skc->skc_waitq,
1088 		    spl_cache_grow_wait(skc), HZ / 10);
1089 
1090 		if (!remaining) {
1091 			spin_lock(&skc->skc_lock);
1092 			if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1093 				set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1094 				skc->skc_obj_deadlock++;
1095 			}
1096 			spin_unlock(&skc->skc_lock);
1097 		}
1098 
1099 		rc = -ENOMEM;
1100 	}
1101 
1102 	return (rc);
1103 }
1104 
1105 /*
1106  * Refill a per-cpu magazine with objects from the slabs for this cache.
1107  * Ideally the magazine can be repopulated using existing objects which have
1108  * been released, however if we are unable to locate enough free objects new
1109  * slabs of objects will be created.  On success NULL is returned, otherwise
1110  * the address of a single emergency object is returned for use by the caller.
1111  */
1112 static void *
1113 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1114 {
1115 	spl_kmem_slab_t *sks;
1116 	int count = 0, rc, refill;
1117 	void *obj = NULL;
1118 
1119 	ASSERT(skc->skc_magic == SKC_MAGIC);
1120 	ASSERT(skm->skm_magic == SKM_MAGIC);
1121 
1122 	refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1123 	spin_lock(&skc->skc_lock);
1124 
1125 	while (refill > 0) {
1126 		/* No slabs available we may need to grow the cache */
1127 		if (list_empty(&skc->skc_partial_list)) {
1128 			spin_unlock(&skc->skc_lock);
1129 
1130 			local_irq_enable();
1131 			rc = spl_cache_grow(skc, flags, &obj);
1132 			local_irq_disable();
1133 
1134 			/* Emergency object for immediate use by caller */
1135 			if (rc == 0 && obj != NULL)
1136 				return (obj);
1137 
1138 			if (rc)
1139 				goto out;
1140 
1141 			/* Rescheduled to different CPU skm is not local */
1142 			if (skm != skc->skc_mag[smp_processor_id()])
1143 				goto out;
1144 
1145 			/*
1146 			 * Potentially rescheduled to the same CPU but
1147 			 * allocations may have occurred from this CPU while
1148 			 * we were sleeping so recalculate max refill.
1149 			 */
1150 			refill = MIN(refill, skm->skm_size - skm->skm_avail);
1151 
1152 			spin_lock(&skc->skc_lock);
1153 			continue;
1154 		}
1155 
1156 		/* Grab the next available slab */
1157 		sks = list_entry((&skc->skc_partial_list)->next,
1158 		    spl_kmem_slab_t, sks_list);
1159 		ASSERT(sks->sks_magic == SKS_MAGIC);
1160 		ASSERT(sks->sks_ref < sks->sks_objs);
1161 		ASSERT(!list_empty(&sks->sks_free_list));
1162 
1163 		/*
1164 		 * Consume as many objects as needed to refill the requested
1165 		 * cache.  We must also be careful not to overfill it.
1166 		 */
1167 		while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1168 		    ++count) {
1169 			ASSERT(skm->skm_avail < skm->skm_size);
1170 			ASSERT(count < skm->skm_size);
1171 			skm->skm_objs[skm->skm_avail++] =
1172 			    spl_cache_obj(skc, sks);
1173 		}
1174 
1175 		/* Move slab to skc_complete_list when full */
1176 		if (sks->sks_ref == sks->sks_objs) {
1177 			list_del(&sks->sks_list);
1178 			list_add(&sks->sks_list, &skc->skc_complete_list);
1179 		}
1180 	}
1181 
1182 	spin_unlock(&skc->skc_lock);
1183 out:
1184 	return (NULL);
1185 }
1186 
1187 /*
1188  * Release an object back to the slab from which it came.
1189  */
1190 static void
1191 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1192 {
1193 	spl_kmem_slab_t *sks = NULL;
1194 	spl_kmem_obj_t *sko = NULL;
1195 
1196 	ASSERT(skc->skc_magic == SKC_MAGIC);
1197 
1198 	sko = spl_sko_from_obj(skc, obj);
1199 	ASSERT(sko->sko_magic == SKO_MAGIC);
1200 	sks = sko->sko_slab;
1201 	ASSERT(sks->sks_magic == SKS_MAGIC);
1202 	ASSERT(sks->sks_cache == skc);
1203 	list_add(&sko->sko_list, &sks->sks_free_list);
1204 
1205 	sks->sks_age = jiffies;
1206 	sks->sks_ref--;
1207 	skc->skc_obj_alloc--;
1208 
1209 	/*
1210 	 * Move slab to skc_partial_list when no longer full.  Slabs
1211 	 * are added to the head to keep the partial list is quasi-full
1212 	 * sorted order.  Fuller at the head, emptier at the tail.
1213 	 */
1214 	if (sks->sks_ref == (sks->sks_objs - 1)) {
1215 		list_del(&sks->sks_list);
1216 		list_add(&sks->sks_list, &skc->skc_partial_list);
1217 	}
1218 
1219 	/*
1220 	 * Move empty slabs to the end of the partial list so
1221 	 * they can be easily found and freed during reclamation.
1222 	 */
1223 	if (sks->sks_ref == 0) {
1224 		list_del(&sks->sks_list);
1225 		list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1226 		skc->skc_slab_alloc--;
1227 	}
1228 }
1229 
1230 /*
1231  * Allocate an object from the per-cpu magazine, or if the magazine
1232  * is empty directly allocate from a slab and repopulate the magazine.
1233  */
1234 void *
1235 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1236 {
1237 	spl_kmem_magazine_t *skm;
1238 	void *obj = NULL;
1239 
1240 	ASSERT0(flags & ~KM_PUBLIC_MASK);
1241 	ASSERT(skc->skc_magic == SKC_MAGIC);
1242 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1243 
1244 	/*
1245 	 * Allocate directly from a Linux slab.  All optimizations are left
1246 	 * to the underlying cache we only need to guarantee that KM_SLEEP
1247 	 * callers will never fail.
1248 	 */
1249 	if (skc->skc_flags & KMC_SLAB) {
1250 		struct kmem_cache *slc = skc->skc_linux_cache;
1251 		do {
1252 			obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1253 		} while ((obj == NULL) && !(flags & KM_NOSLEEP));
1254 
1255 		if (obj != NULL) {
1256 			/*
1257 			 * Even though we leave everything up to the
1258 			 * underlying cache we still keep track of
1259 			 * how many objects we've allocated in it for
1260 			 * better debuggability.
1261 			 */
1262 			percpu_counter_inc(&skc->skc_linux_alloc);
1263 		}
1264 		goto ret;
1265 	}
1266 
1267 	local_irq_disable();
1268 
1269 restart:
1270 	/*
1271 	 * Safe to update per-cpu structure without lock, but
1272 	 * in the restart case we must be careful to reacquire
1273 	 * the local magazine since this may have changed
1274 	 * when we need to grow the cache.
1275 	 */
1276 	skm = skc->skc_mag[smp_processor_id()];
1277 	ASSERT(skm->skm_magic == SKM_MAGIC);
1278 
1279 	if (likely(skm->skm_avail)) {
1280 		/* Object available in CPU cache, use it */
1281 		obj = skm->skm_objs[--skm->skm_avail];
1282 	} else {
1283 		obj = spl_cache_refill(skc, skm, flags);
1284 		if ((obj == NULL) && !(flags & KM_NOSLEEP))
1285 			goto restart;
1286 
1287 		local_irq_enable();
1288 		goto ret;
1289 	}
1290 
1291 	local_irq_enable();
1292 	ASSERT(obj);
1293 	ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1294 
1295 ret:
1296 	/* Pre-emptively migrate object to CPU L1 cache */
1297 	if (obj) {
1298 		if (obj && skc->skc_ctor)
1299 			skc->skc_ctor(obj, skc->skc_private, flags);
1300 		else
1301 			prefetchw(obj);
1302 	}
1303 
1304 	return (obj);
1305 }
1306 EXPORT_SYMBOL(spl_kmem_cache_alloc);
1307 
1308 /*
1309  * Free an object back to the local per-cpu magazine, there is no
1310  * guarantee that this is the same magazine the object was originally
1311  * allocated from.  We may need to flush entire from the magazine
1312  * back to the slabs to make space.
1313  */
1314 void
1315 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1316 {
1317 	spl_kmem_magazine_t *skm;
1318 	unsigned long flags;
1319 	int do_reclaim = 0;
1320 	int do_emergency = 0;
1321 
1322 	ASSERT(skc->skc_magic == SKC_MAGIC);
1323 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1324 
1325 	/*
1326 	 * Run the destructor
1327 	 */
1328 	if (skc->skc_dtor)
1329 		skc->skc_dtor(obj, skc->skc_private);
1330 
1331 	/*
1332 	 * Free the object from the Linux underlying Linux slab.
1333 	 */
1334 	if (skc->skc_flags & KMC_SLAB) {
1335 		kmem_cache_free(skc->skc_linux_cache, obj);
1336 		percpu_counter_dec(&skc->skc_linux_alloc);
1337 		return;
1338 	}
1339 
1340 	/*
1341 	 * While a cache has outstanding emergency objects all freed objects
1342 	 * must be checked.  However, since emergency objects will never use
1343 	 * a virtual address these objects can be safely excluded as an
1344 	 * optimization.
1345 	 */
1346 	if (!is_vmalloc_addr(obj)) {
1347 		spin_lock(&skc->skc_lock);
1348 		do_emergency = (skc->skc_obj_emergency > 0);
1349 		spin_unlock(&skc->skc_lock);
1350 
1351 		if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1352 			return;
1353 	}
1354 
1355 	local_irq_save(flags);
1356 
1357 	/*
1358 	 * Safe to update per-cpu structure without lock, but
1359 	 * no remote memory allocation tracking is being performed
1360 	 * it is entirely possible to allocate an object from one
1361 	 * CPU cache and return it to another.
1362 	 */
1363 	skm = skc->skc_mag[smp_processor_id()];
1364 	ASSERT(skm->skm_magic == SKM_MAGIC);
1365 
1366 	/*
1367 	 * Per-CPU cache full, flush it to make space for this object,
1368 	 * this may result in an empty slab which can be reclaimed once
1369 	 * interrupts are re-enabled.
1370 	 */
1371 	if (unlikely(skm->skm_avail >= skm->skm_size)) {
1372 		spl_cache_flush(skc, skm, skm->skm_refill);
1373 		do_reclaim = 1;
1374 	}
1375 
1376 	/* Available space in cache, use it */
1377 	skm->skm_objs[skm->skm_avail++] = obj;
1378 
1379 	local_irq_restore(flags);
1380 
1381 	if (do_reclaim)
1382 		spl_slab_reclaim(skc);
1383 }
1384 EXPORT_SYMBOL(spl_kmem_cache_free);
1385 
1386 /*
1387  * Depending on how many and which objects are released it may simply
1388  * repopulate the local magazine which will then need to age-out.  Objects
1389  * which cannot fit in the magazine will be released back to their slabs
1390  * which will also need to age out before being released.  This is all just
1391  * best effort and we do not want to thrash creating and destroying slabs.
1392  */
1393 void
1394 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1395 {
1396 	ASSERT(skc->skc_magic == SKC_MAGIC);
1397 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1398 
1399 	if (skc->skc_flags & KMC_SLAB)
1400 		return;
1401 
1402 	atomic_inc(&skc->skc_ref);
1403 
1404 	/*
1405 	 * Prevent concurrent cache reaping when contended.
1406 	 */
1407 	if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1408 		goto out;
1409 
1410 	/* Reclaim from the magazine and free all now empty slabs. */
1411 	unsigned long irq_flags;
1412 	local_irq_save(irq_flags);
1413 	spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1414 	spl_cache_flush(skc, skm, skm->skm_avail);
1415 	local_irq_restore(irq_flags);
1416 
1417 	spl_slab_reclaim(skc);
1418 	clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1419 	smp_mb__after_atomic();
1420 	wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1421 out:
1422 	atomic_dec(&skc->skc_ref);
1423 }
1424 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1425 
1426 /*
1427  * This is stubbed out for code consistency with other platforms.  There
1428  * is existing logic to prevent concurrent reaping so while this is ugly
1429  * it should do no harm.
1430  */
1431 int
1432 spl_kmem_cache_reap_active(void)
1433 {
1434 	return (0);
1435 }
1436 EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1437 
1438 /*
1439  * Reap all free slabs from all registered caches.
1440  */
1441 void
1442 spl_kmem_reap(void)
1443 {
1444 	spl_kmem_cache_t *skc = NULL;
1445 
1446 	down_read(&spl_kmem_cache_sem);
1447 	list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1448 		spl_kmem_cache_reap_now(skc);
1449 	}
1450 	up_read(&spl_kmem_cache_sem);
1451 }
1452 EXPORT_SYMBOL(spl_kmem_reap);
1453 
1454 int
1455 spl_kmem_cache_init(void)
1456 {
1457 	init_rwsem(&spl_kmem_cache_sem);
1458 	INIT_LIST_HEAD(&spl_kmem_cache_list);
1459 	spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1460 	    spl_kmem_cache_kmem_threads, maxclsyspri,
1461 	    spl_kmem_cache_kmem_threads * 8, INT_MAX,
1462 	    TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1463 
1464 	if (spl_kmem_cache_taskq == NULL)
1465 		return (-ENOMEM);
1466 
1467 	return (0);
1468 }
1469 
1470 void
1471 spl_kmem_cache_fini(void)
1472 {
1473 	taskq_destroy(spl_kmem_cache_taskq);
1474 }
1475