1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #define SPL_KMEM_CACHE_IMPLEMENTING
25
26 #include <linux/percpu_compat.h>
27 #include <sys/kmem.h>
28 #include <sys/kmem_cache.h>
29 #include <sys/taskq.h>
30 #include <sys/timer.h>
31 #include <sys/vmem.h>
32 #include <sys/wait.h>
33 #include <sys/string.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/prefetch.h>
37
38 /*
39 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
40 * with smp_mb__{before,after}_atomic() because they were redundant. This is
41 * only used inside our SLAB allocator, so we implement an internal wrapper
42 * here to give us smp_mb__{before,after}_atomic() on older kernels.
43 */
44 #ifndef smp_mb__before_atomic
45 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
46 #endif
47
48 #ifndef smp_mb__after_atomic
49 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
50 #endif
51
52 /* BEGIN CSTYLED */
53 /*
54 * Cache magazines are an optimization designed to minimize the cost of
55 * allocating memory. They do this by keeping a per-cpu cache of recently
56 * freed objects, which can then be reallocated without taking a lock. This
57 * can improve performance on highly contended caches. However, because
58 * objects in magazines will prevent otherwise empty slabs from being
59 * immediately released this may not be ideal for low memory machines.
60 *
61 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
62 * magazine size. When this value is set to 0 the magazine size will be
63 * automatically determined based on the object size. Otherwise magazines
64 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
65 * may never be entirely disabled in this implementation.
66 */
67 static unsigned int spl_kmem_cache_magazine_size = 0;
68 module_param(spl_kmem_cache_magazine_size, uint, 0444);
69 MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
70 "Default magazine size (2-256), set automatically (0)");
71
72 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
73 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
74 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
75
76 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
77 module_param(spl_kmem_cache_max_size, uint, 0644);
78 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
79
80 /*
81 * For small objects the Linux slab allocator should be used to make the most
82 * efficient use of the memory. However, large objects are not supported by
83 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
84 * of 16K was determined to be optimal for architectures using 4K pages and
85 * to also work well on architecutres using larger 64K page sizes.
86 */
87 static unsigned int spl_kmem_cache_slab_limit =
88 SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
89 module_param(spl_kmem_cache_slab_limit, uint, 0644);
90 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
91 "Objects less than N bytes use the Linux slab");
92
93 /*
94 * The number of threads available to allocate new slabs for caches. This
95 * should not need to be tuned but it is available for performance analysis.
96 */
97 static unsigned int spl_kmem_cache_kmem_threads = 4;
98 module_param(spl_kmem_cache_kmem_threads, uint, 0444);
99 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
100 "Number of spl_kmem_cache threads");
101 /* END CSTYLED */
102
103 /*
104 * Slab allocation interfaces
105 *
106 * While the Linux slab implementation was inspired by the Solaris
107 * implementation I cannot use it to emulate the Solaris APIs. I
108 * require two features which are not provided by the Linux slab.
109 *
110 * 1) Constructors AND destructors. Recent versions of the Linux
111 * kernel have removed support for destructors. This is a deal
112 * breaker for the SPL which contains particularly expensive
113 * initializers for mutex's, condition variables, etc. We also
114 * require a minimal level of cleanup for these data types unlike
115 * many Linux data types which do need to be explicitly destroyed.
116 *
117 * 2) Virtual address space backed slab. Callers of the Solaris slab
118 * expect it to work well for both small are very large allocations.
119 * Because of memory fragmentation the Linux slab which is backed
120 * by kmalloc'ed memory performs very badly when confronted with
121 * large numbers of large allocations. Basing the slab on the
122 * virtual address space removes the need for contiguous pages
123 * and greatly improve performance for large allocations.
124 *
125 * For these reasons, the SPL has its own slab implementation with
126 * the needed features. It is not as highly optimized as either the
127 * Solaris or Linux slabs, but it should get me most of what is
128 * needed until it can be optimized or obsoleted by another approach.
129 *
130 * One serious concern I do have about this method is the relatively
131 * small virtual address space on 32bit arches. This will seriously
132 * constrain the size of the slab caches and their performance.
133 */
134
135 struct list_head spl_kmem_cache_list; /* List of caches */
136 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
137 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
138
139 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
140
141 static void *
kv_alloc(spl_kmem_cache_t * skc,int size,int flags)142 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
143 {
144 gfp_t lflags = kmem_flags_convert(flags);
145 void *ptr;
146
147 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
148
149 /* Resulting allocated memory will be page aligned */
150 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
151
152 return (ptr);
153 }
154
155 static void
kv_free(spl_kmem_cache_t * skc,void * ptr,int size)156 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
157 {
158 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
159
160 /*
161 * The Linux direct reclaim path uses this out of band value to
162 * determine if forward progress is being made. Normally this is
163 * incremented by kmem_freepages() which is part of the various
164 * Linux slab implementations. However, since we are using none
165 * of that infrastructure we are responsible for incrementing it.
166 */
167 if (current->reclaim_state)
168 #ifdef HAVE_RECLAIM_STATE_RECLAIMED
169 current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
170 #else
171 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
172 #endif
173 vfree(ptr);
174 }
175
176 /*
177 * Required space for each aligned sks.
178 */
179 static inline uint32_t
spl_sks_size(spl_kmem_cache_t * skc)180 spl_sks_size(spl_kmem_cache_t *skc)
181 {
182 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
183 skc->skc_obj_align, uint32_t));
184 }
185
186 /*
187 * Required space for each aligned object.
188 */
189 static inline uint32_t
spl_obj_size(spl_kmem_cache_t * skc)190 spl_obj_size(spl_kmem_cache_t *skc)
191 {
192 uint32_t align = skc->skc_obj_align;
193
194 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
195 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
196 }
197
198 uint64_t
spl_kmem_cache_inuse(kmem_cache_t * cache)199 spl_kmem_cache_inuse(kmem_cache_t *cache)
200 {
201 return (cache->skc_obj_total);
202 }
203 EXPORT_SYMBOL(spl_kmem_cache_inuse);
204
205 uint64_t
spl_kmem_cache_entry_size(kmem_cache_t * cache)206 spl_kmem_cache_entry_size(kmem_cache_t *cache)
207 {
208 return (cache->skc_obj_size);
209 }
210 EXPORT_SYMBOL(spl_kmem_cache_entry_size);
211
212 /*
213 * Lookup the spl_kmem_object_t for an object given that object.
214 */
215 static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t * skc,void * obj)216 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
217 {
218 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
219 skc->skc_obj_align, uint32_t));
220 }
221
222 /*
223 * It's important that we pack the spl_kmem_obj_t structure and the
224 * actual objects in to one large address space to minimize the number
225 * of calls to the allocator. It is far better to do a few large
226 * allocations and then subdivide it ourselves. Now which allocator
227 * we use requires balancing a few trade offs.
228 *
229 * For small objects we use kmem_alloc() because as long as you are
230 * only requesting a small number of pages (ideally just one) its cheap.
231 * However, when you start requesting multiple pages with kmem_alloc()
232 * it gets increasingly expensive since it requires contiguous pages.
233 * For this reason we shift to vmem_alloc() for slabs of large objects
234 * which removes the need for contiguous pages. We do not use
235 * vmem_alloc() in all cases because there is significant locking
236 * overhead in __get_vm_area_node(). This function takes a single
237 * global lock when acquiring an available virtual address range which
238 * serializes all vmem_alloc()'s for all slab caches. Using slightly
239 * different allocation functions for small and large objects should
240 * give us the best of both worlds.
241 *
242 * +------------------------+
243 * | spl_kmem_slab_t --+-+ |
244 * | skc_obj_size <-+ | |
245 * | spl_kmem_obj_t | |
246 * | skc_obj_size <---+ |
247 * | spl_kmem_obj_t | |
248 * | ... v |
249 * +------------------------+
250 */
251 static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t * skc,int flags)252 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
253 {
254 spl_kmem_slab_t *sks;
255 void *base;
256 uint32_t obj_size;
257
258 base = kv_alloc(skc, skc->skc_slab_size, flags);
259 if (base == NULL)
260 return (NULL);
261
262 sks = (spl_kmem_slab_t *)base;
263 sks->sks_magic = SKS_MAGIC;
264 sks->sks_objs = skc->skc_slab_objs;
265 sks->sks_age = jiffies;
266 sks->sks_cache = skc;
267 INIT_LIST_HEAD(&sks->sks_list);
268 INIT_LIST_HEAD(&sks->sks_free_list);
269 sks->sks_ref = 0;
270 obj_size = spl_obj_size(skc);
271
272 for (int i = 0; i < sks->sks_objs; i++) {
273 void *obj = base + spl_sks_size(skc) + (i * obj_size);
274
275 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
276 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
277 sko->sko_addr = obj;
278 sko->sko_magic = SKO_MAGIC;
279 sko->sko_slab = sks;
280 INIT_LIST_HEAD(&sko->sko_list);
281 list_add_tail(&sko->sko_list, &sks->sks_free_list);
282 }
283
284 return (sks);
285 }
286
287 /*
288 * Remove a slab from complete or partial list, it must be called with
289 * the 'skc->skc_lock' held but the actual free must be performed
290 * outside the lock to prevent deadlocking on vmem addresses.
291 */
292 static void
spl_slab_free(spl_kmem_slab_t * sks,struct list_head * sks_list,struct list_head * sko_list)293 spl_slab_free(spl_kmem_slab_t *sks,
294 struct list_head *sks_list, struct list_head *sko_list)
295 {
296 spl_kmem_cache_t *skc;
297
298 ASSERT(sks->sks_magic == SKS_MAGIC);
299 ASSERT(sks->sks_ref == 0);
300
301 skc = sks->sks_cache;
302 ASSERT(skc->skc_magic == SKC_MAGIC);
303
304 /*
305 * Update slab/objects counters in the cache, then remove the
306 * slab from the skc->skc_partial_list. Finally add the slab
307 * and all its objects in to the private work lists where the
308 * destructors will be called and the memory freed to the system.
309 */
310 skc->skc_obj_total -= sks->sks_objs;
311 skc->skc_slab_total--;
312 list_del(&sks->sks_list);
313 list_add(&sks->sks_list, sks_list);
314 list_splice_init(&sks->sks_free_list, sko_list);
315 }
316
317 /*
318 * Reclaim empty slabs at the end of the partial list.
319 */
320 static void
spl_slab_reclaim(spl_kmem_cache_t * skc)321 spl_slab_reclaim(spl_kmem_cache_t *skc)
322 {
323 spl_kmem_slab_t *sks = NULL, *m = NULL;
324 spl_kmem_obj_t *sko = NULL, *n = NULL;
325 LIST_HEAD(sks_list);
326 LIST_HEAD(sko_list);
327
328 /*
329 * Empty slabs and objects must be moved to a private list so they
330 * can be safely freed outside the spin lock. All empty slabs are
331 * at the end of skc->skc_partial_list, therefore once a non-empty
332 * slab is found we can stop scanning.
333 */
334 spin_lock(&skc->skc_lock);
335 list_for_each_entry_safe_reverse(sks, m,
336 &skc->skc_partial_list, sks_list) {
337
338 if (sks->sks_ref > 0)
339 break;
340
341 spl_slab_free(sks, &sks_list, &sko_list);
342 }
343 spin_unlock(&skc->skc_lock);
344
345 /*
346 * The following two loops ensure all the object destructors are run,
347 * and the slabs themselves are freed. This is all done outside the
348 * skc->skc_lock since this allows the destructor to sleep, and
349 * allows us to perform a conditional reschedule when a freeing a
350 * large number of objects and slabs back to the system.
351 */
352
353 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
354 ASSERT(sko->sko_magic == SKO_MAGIC);
355 }
356
357 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
358 ASSERT(sks->sks_magic == SKS_MAGIC);
359 kv_free(skc, sks, skc->skc_slab_size);
360 }
361 }
362
363 static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root * root,void * obj)364 spl_emergency_search(struct rb_root *root, void *obj)
365 {
366 struct rb_node *node = root->rb_node;
367 spl_kmem_emergency_t *ske;
368 unsigned long address = (unsigned long)obj;
369
370 while (node) {
371 ske = container_of(node, spl_kmem_emergency_t, ske_node);
372
373 if (address < ske->ske_obj)
374 node = node->rb_left;
375 else if (address > ske->ske_obj)
376 node = node->rb_right;
377 else
378 return (ske);
379 }
380
381 return (NULL);
382 }
383
384 static int
spl_emergency_insert(struct rb_root * root,spl_kmem_emergency_t * ske)385 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
386 {
387 struct rb_node **new = &(root->rb_node), *parent = NULL;
388 spl_kmem_emergency_t *ske_tmp;
389 unsigned long address = ske->ske_obj;
390
391 while (*new) {
392 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
393
394 parent = *new;
395 if (address < ske_tmp->ske_obj)
396 new = &((*new)->rb_left);
397 else if (address > ske_tmp->ske_obj)
398 new = &((*new)->rb_right);
399 else
400 return (0);
401 }
402
403 rb_link_node(&ske->ske_node, parent, new);
404 rb_insert_color(&ske->ske_node, root);
405
406 return (1);
407 }
408
409 /*
410 * Allocate a single emergency object and track it in a red black tree.
411 */
412 static int
spl_emergency_alloc(spl_kmem_cache_t * skc,int flags,void ** obj)413 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
414 {
415 gfp_t lflags = kmem_flags_convert(flags);
416 spl_kmem_emergency_t *ske;
417 int order = get_order(skc->skc_obj_size);
418 int empty;
419
420 /* Last chance use a partial slab if one now exists */
421 spin_lock(&skc->skc_lock);
422 empty = list_empty(&skc->skc_partial_list);
423 spin_unlock(&skc->skc_lock);
424 if (!empty)
425 return (-EEXIST);
426
427 ske = kmalloc(sizeof (*ske), lflags);
428 if (ske == NULL)
429 return (-ENOMEM);
430
431 ske->ske_obj = __get_free_pages(lflags, order);
432 if (ske->ske_obj == 0) {
433 kfree(ske);
434 return (-ENOMEM);
435 }
436
437 spin_lock(&skc->skc_lock);
438 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
439 if (likely(empty)) {
440 skc->skc_obj_total++;
441 skc->skc_obj_emergency++;
442 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
443 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
444 }
445 spin_unlock(&skc->skc_lock);
446
447 if (unlikely(!empty)) {
448 free_pages(ske->ske_obj, order);
449 kfree(ske);
450 return (-EINVAL);
451 }
452
453 *obj = (void *)ske->ske_obj;
454
455 return (0);
456 }
457
458 /*
459 * Locate the passed object in the red black tree and free it.
460 */
461 static int
spl_emergency_free(spl_kmem_cache_t * skc,void * obj)462 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
463 {
464 spl_kmem_emergency_t *ske;
465 int order = get_order(skc->skc_obj_size);
466
467 spin_lock(&skc->skc_lock);
468 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
469 if (ske) {
470 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
471 skc->skc_obj_emergency--;
472 skc->skc_obj_total--;
473 }
474 spin_unlock(&skc->skc_lock);
475
476 if (ske == NULL)
477 return (-ENOENT);
478
479 free_pages(ske->ske_obj, order);
480 kfree(ske);
481
482 return (0);
483 }
484
485 /*
486 * Release objects from the per-cpu magazine back to their slab. The flush
487 * argument contains the max number of entries to remove from the magazine.
488 */
489 static void
spl_cache_flush(spl_kmem_cache_t * skc,spl_kmem_magazine_t * skm,int flush)490 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
491 {
492 spin_lock(&skc->skc_lock);
493
494 ASSERT(skc->skc_magic == SKC_MAGIC);
495 ASSERT(skm->skm_magic == SKM_MAGIC);
496
497 int count = MIN(flush, skm->skm_avail);
498 for (int i = 0; i < count; i++)
499 spl_cache_shrink(skc, skm->skm_objs[i]);
500
501 skm->skm_avail -= count;
502 memmove(skm->skm_objs, &(skm->skm_objs[count]),
503 sizeof (void *) * skm->skm_avail);
504
505 spin_unlock(&skc->skc_lock);
506 }
507
508 /*
509 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
510 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
511 * for very small objects we may end up with more than this so as not
512 * to waste space in the minimal allocation of a single page.
513 */
514 static int
spl_slab_size(spl_kmem_cache_t * skc,uint32_t * objs,uint32_t * size)515 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
516 {
517 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
518
519 sks_size = spl_sks_size(skc);
520 obj_size = spl_obj_size(skc);
521 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
522 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
523
524 if (tgt_size <= max_size) {
525 tgt_objs = (tgt_size - sks_size) / obj_size;
526 } else {
527 tgt_objs = (max_size - sks_size) / obj_size;
528 tgt_size = (tgt_objs * obj_size) + sks_size;
529 }
530
531 if (tgt_objs == 0)
532 return (-ENOSPC);
533
534 *objs = tgt_objs;
535 *size = tgt_size;
536
537 return (0);
538 }
539
540 /*
541 * Make a guess at reasonable per-cpu magazine size based on the size of
542 * each object and the cost of caching N of them in each magazine. Long
543 * term this should really adapt based on an observed usage heuristic.
544 */
545 static int
spl_magazine_size(spl_kmem_cache_t * skc)546 spl_magazine_size(spl_kmem_cache_t *skc)
547 {
548 uint32_t obj_size = spl_obj_size(skc);
549 int size;
550
551 if (spl_kmem_cache_magazine_size > 0)
552 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
553
554 /* Per-magazine sizes below assume a 4Kib page size */
555 if (obj_size > (PAGE_SIZE * 256))
556 size = 4; /* Minimum 4Mib per-magazine */
557 else if (obj_size > (PAGE_SIZE * 32))
558 size = 16; /* Minimum 2Mib per-magazine */
559 else if (obj_size > (PAGE_SIZE))
560 size = 64; /* Minimum 256Kib per-magazine */
561 else if (obj_size > (PAGE_SIZE / 4))
562 size = 128; /* Minimum 128Kib per-magazine */
563 else
564 size = 256;
565
566 return (size);
567 }
568
569 /*
570 * Allocate a per-cpu magazine to associate with a specific core.
571 */
572 static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t * skc,int cpu)573 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
574 {
575 spl_kmem_magazine_t *skm;
576 int size = sizeof (spl_kmem_magazine_t) +
577 sizeof (void *) * skc->skc_mag_size;
578
579 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
580 if (skm) {
581 skm->skm_magic = SKM_MAGIC;
582 skm->skm_avail = 0;
583 skm->skm_size = skc->skc_mag_size;
584 skm->skm_refill = skc->skc_mag_refill;
585 skm->skm_cache = skc;
586 skm->skm_cpu = cpu;
587 }
588
589 return (skm);
590 }
591
592 /*
593 * Free a per-cpu magazine associated with a specific core.
594 */
595 static void
spl_magazine_free(spl_kmem_magazine_t * skm)596 spl_magazine_free(spl_kmem_magazine_t *skm)
597 {
598 ASSERT(skm->skm_magic == SKM_MAGIC);
599 ASSERT(skm->skm_avail == 0);
600 kfree(skm);
601 }
602
603 /*
604 * Create all pre-cpu magazines of reasonable sizes.
605 */
606 static int
spl_magazine_create(spl_kmem_cache_t * skc)607 spl_magazine_create(spl_kmem_cache_t *skc)
608 {
609 int i = 0;
610
611 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
612
613 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
614 num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
615 skc->skc_mag_size = spl_magazine_size(skc);
616 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
617
618 for_each_possible_cpu(i) {
619 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
620 if (!skc->skc_mag[i]) {
621 for (i--; i >= 0; i--)
622 spl_magazine_free(skc->skc_mag[i]);
623
624 kfree(skc->skc_mag);
625 return (-ENOMEM);
626 }
627 }
628
629 return (0);
630 }
631
632 /*
633 * Destroy all pre-cpu magazines.
634 */
635 static void
spl_magazine_destroy(spl_kmem_cache_t * skc)636 spl_magazine_destroy(spl_kmem_cache_t *skc)
637 {
638 spl_kmem_magazine_t *skm;
639 int i = 0;
640
641 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
642
643 for_each_possible_cpu(i) {
644 skm = skc->skc_mag[i];
645 spl_cache_flush(skc, skm, skm->skm_avail);
646 spl_magazine_free(skm);
647 }
648
649 kfree(skc->skc_mag);
650 }
651
652 /*
653 * Create a object cache based on the following arguments:
654 * name cache name
655 * size cache object size
656 * align cache object alignment
657 * ctor cache object constructor
658 * dtor cache object destructor
659 * reclaim cache object reclaim
660 * priv cache private data for ctor/dtor/reclaim
661 * vmp unused must be NULL
662 * flags
663 * KMC_KVMEM Force kvmem backed SPL cache
664 * KMC_SLAB Force Linux slab backed cache
665 * KMC_NODEBUG Disable debugging (unsupported)
666 */
667 spl_kmem_cache_t *
spl_kmem_cache_create(const char * name,size_t size,size_t align,spl_kmem_ctor_t ctor,spl_kmem_dtor_t dtor,void * reclaim,void * priv,void * vmp,int flags)668 spl_kmem_cache_create(const char *name, size_t size, size_t align,
669 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
670 void *priv, void *vmp, int flags)
671 {
672 gfp_t lflags = kmem_flags_convert(KM_SLEEP);
673 spl_kmem_cache_t *skc;
674 int rc;
675
676 /*
677 * Unsupported flags
678 */
679 ASSERT(vmp == NULL);
680 ASSERT(reclaim == NULL);
681
682 might_sleep();
683
684 skc = kzalloc(sizeof (*skc), lflags);
685 if (skc == NULL)
686 return (NULL);
687
688 skc->skc_magic = SKC_MAGIC;
689 skc->skc_name_size = strlen(name) + 1;
690 skc->skc_name = kmalloc(skc->skc_name_size, lflags);
691 if (skc->skc_name == NULL) {
692 kfree(skc);
693 return (NULL);
694 }
695 strlcpy(skc->skc_name, name, skc->skc_name_size);
696
697 skc->skc_ctor = ctor;
698 skc->skc_dtor = dtor;
699 skc->skc_private = priv;
700 skc->skc_vmp = vmp;
701 skc->skc_linux_cache = NULL;
702 skc->skc_flags = flags;
703 skc->skc_obj_size = size;
704 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
705 atomic_set(&skc->skc_ref, 0);
706
707 INIT_LIST_HEAD(&skc->skc_list);
708 INIT_LIST_HEAD(&skc->skc_complete_list);
709 INIT_LIST_HEAD(&skc->skc_partial_list);
710 skc->skc_emergency_tree = RB_ROOT;
711 spin_lock_init(&skc->skc_lock);
712 init_waitqueue_head(&skc->skc_waitq);
713 skc->skc_slab_fail = 0;
714 skc->skc_slab_create = 0;
715 skc->skc_slab_destroy = 0;
716 skc->skc_slab_total = 0;
717 skc->skc_slab_alloc = 0;
718 skc->skc_slab_max = 0;
719 skc->skc_obj_total = 0;
720 skc->skc_obj_alloc = 0;
721 skc->skc_obj_max = 0;
722 skc->skc_obj_deadlock = 0;
723 skc->skc_obj_emergency = 0;
724 skc->skc_obj_emergency_max = 0;
725
726 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
727 GFP_KERNEL);
728 if (rc != 0) {
729 kfree(skc);
730 return (NULL);
731 }
732
733 /*
734 * Verify the requested alignment restriction is sane.
735 */
736 if (align) {
737 VERIFY(ISP2(align));
738 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
739 VERIFY3U(align, <=, PAGE_SIZE);
740 skc->skc_obj_align = align;
741 }
742
743 /*
744 * When no specific type of slab is requested (kmem, vmem, or
745 * linuxslab) then select a cache type based on the object size
746 * and default tunables.
747 */
748 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
749 if (spl_kmem_cache_slab_limit &&
750 size <= (size_t)spl_kmem_cache_slab_limit) {
751 /*
752 * Objects smaller than spl_kmem_cache_slab_limit can
753 * use the Linux slab for better space-efficiency.
754 */
755 skc->skc_flags |= KMC_SLAB;
756 } else {
757 /*
758 * All other objects are considered large and are
759 * placed on kvmem backed slabs.
760 */
761 skc->skc_flags |= KMC_KVMEM;
762 }
763 }
764
765 /*
766 * Given the type of slab allocate the required resources.
767 */
768 if (skc->skc_flags & KMC_KVMEM) {
769 rc = spl_slab_size(skc,
770 &skc->skc_slab_objs, &skc->skc_slab_size);
771 if (rc)
772 goto out;
773
774 rc = spl_magazine_create(skc);
775 if (rc)
776 goto out;
777 } else {
778 unsigned long slabflags = 0;
779
780 if (size > spl_kmem_cache_slab_limit)
781 goto out;
782
783 #if defined(SLAB_USERCOPY)
784 /*
785 * Required for PAX-enabled kernels if the slab is to be
786 * used for copying between user and kernel space.
787 */
788 slabflags |= SLAB_USERCOPY;
789 #endif
790
791 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
792 /*
793 * Newer grsec patchset uses kmem_cache_create_usercopy()
794 * instead of SLAB_USERCOPY flag
795 */
796 skc->skc_linux_cache = kmem_cache_create_usercopy(
797 skc->skc_name, size, align, slabflags, 0, size, NULL);
798 #else
799 skc->skc_linux_cache = kmem_cache_create(
800 skc->skc_name, size, align, slabflags, NULL);
801 #endif
802 if (skc->skc_linux_cache == NULL)
803 goto out;
804 }
805
806 down_write(&spl_kmem_cache_sem);
807 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
808 up_write(&spl_kmem_cache_sem);
809
810 return (skc);
811 out:
812 kfree(skc->skc_name);
813 percpu_counter_destroy(&skc->skc_linux_alloc);
814 kfree(skc);
815 return (NULL);
816 }
817 EXPORT_SYMBOL(spl_kmem_cache_create);
818
819 /*
820 * Register a move callback for cache defragmentation.
821 * XXX: Unimplemented but harmless to stub out for now.
822 */
823 void
spl_kmem_cache_set_move(spl_kmem_cache_t * skc,kmem_cbrc_t (move)(void *,void *,size_t,void *))824 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
825 kmem_cbrc_t (move)(void *, void *, size_t, void *))
826 {
827 ASSERT(move != NULL);
828 }
829 EXPORT_SYMBOL(spl_kmem_cache_set_move);
830
831 /*
832 * Destroy a cache and all objects associated with the cache.
833 */
834 void
spl_kmem_cache_destroy(spl_kmem_cache_t * skc)835 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
836 {
837 DECLARE_WAIT_QUEUE_HEAD(wq);
838 taskqid_t id;
839
840 ASSERT(skc->skc_magic == SKC_MAGIC);
841 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
842
843 down_write(&spl_kmem_cache_sem);
844 list_del_init(&skc->skc_list);
845 up_write(&spl_kmem_cache_sem);
846
847 /* Cancel any and wait for any pending delayed tasks */
848 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
849
850 spin_lock(&skc->skc_lock);
851 id = skc->skc_taskqid;
852 spin_unlock(&skc->skc_lock);
853
854 taskq_cancel_id(spl_kmem_cache_taskq, id);
855
856 /*
857 * Wait until all current callers complete, this is mainly
858 * to catch the case where a low memory situation triggers a
859 * cache reaping action which races with this destroy.
860 */
861 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
862
863 if (skc->skc_flags & KMC_KVMEM) {
864 spl_magazine_destroy(skc);
865 spl_slab_reclaim(skc);
866 } else {
867 ASSERT(skc->skc_flags & KMC_SLAB);
868 kmem_cache_destroy(skc->skc_linux_cache);
869 }
870
871 spin_lock(&skc->skc_lock);
872
873 /*
874 * Validate there are no objects in use and free all the
875 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
876 */
877 ASSERT3U(skc->skc_slab_alloc, ==, 0);
878 ASSERT3U(skc->skc_obj_alloc, ==, 0);
879 ASSERT3U(skc->skc_slab_total, ==, 0);
880 ASSERT3U(skc->skc_obj_total, ==, 0);
881 ASSERT3U(skc->skc_obj_emergency, ==, 0);
882 ASSERT(list_empty(&skc->skc_complete_list));
883
884 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
885 percpu_counter_destroy(&skc->skc_linux_alloc);
886
887 spin_unlock(&skc->skc_lock);
888
889 kfree(skc->skc_name);
890 kfree(skc);
891 }
892 EXPORT_SYMBOL(spl_kmem_cache_destroy);
893
894 /*
895 * Allocate an object from a slab attached to the cache. This is used to
896 * repopulate the per-cpu magazine caches in batches when they run low.
897 */
898 static void *
spl_cache_obj(spl_kmem_cache_t * skc,spl_kmem_slab_t * sks)899 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
900 {
901 spl_kmem_obj_t *sko;
902
903 ASSERT(skc->skc_magic == SKC_MAGIC);
904 ASSERT(sks->sks_magic == SKS_MAGIC);
905
906 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
907 ASSERT(sko->sko_magic == SKO_MAGIC);
908 ASSERT(sko->sko_addr != NULL);
909
910 /* Remove from sks_free_list */
911 list_del_init(&sko->sko_list);
912
913 sks->sks_age = jiffies;
914 sks->sks_ref++;
915 skc->skc_obj_alloc++;
916
917 /* Track max obj usage statistics */
918 if (skc->skc_obj_alloc > skc->skc_obj_max)
919 skc->skc_obj_max = skc->skc_obj_alloc;
920
921 /* Track max slab usage statistics */
922 if (sks->sks_ref == 1) {
923 skc->skc_slab_alloc++;
924
925 if (skc->skc_slab_alloc > skc->skc_slab_max)
926 skc->skc_slab_max = skc->skc_slab_alloc;
927 }
928
929 return (sko->sko_addr);
930 }
931
932 /*
933 * Generic slab allocation function to run by the global work queues.
934 * It is responsible for allocating a new slab, linking it in to the list
935 * of partial slabs, and then waking any waiters.
936 */
937 static int
__spl_cache_grow(spl_kmem_cache_t * skc,int flags)938 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
939 {
940 spl_kmem_slab_t *sks;
941
942 fstrans_cookie_t cookie = spl_fstrans_mark();
943 sks = spl_slab_alloc(skc, flags);
944 spl_fstrans_unmark(cookie);
945
946 spin_lock(&skc->skc_lock);
947 if (sks) {
948 skc->skc_slab_total++;
949 skc->skc_obj_total += sks->sks_objs;
950 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
951
952 smp_mb__before_atomic();
953 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
954 smp_mb__after_atomic();
955 }
956 spin_unlock(&skc->skc_lock);
957
958 return (sks == NULL ? -ENOMEM : 0);
959 }
960
961 static void
spl_cache_grow_work(void * data)962 spl_cache_grow_work(void *data)
963 {
964 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
965 spl_kmem_cache_t *skc = ska->ska_cache;
966
967 int error = __spl_cache_grow(skc, ska->ska_flags);
968
969 atomic_dec(&skc->skc_ref);
970 smp_mb__before_atomic();
971 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
972 smp_mb__after_atomic();
973 if (error == 0)
974 wake_up_all(&skc->skc_waitq);
975
976 kfree(ska);
977 }
978
979 /*
980 * Returns non-zero when a new slab should be available.
981 */
982 static int
spl_cache_grow_wait(spl_kmem_cache_t * skc)983 spl_cache_grow_wait(spl_kmem_cache_t *skc)
984 {
985 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
986 }
987
988 /*
989 * No available objects on any slabs, create a new slab. Note that this
990 * functionality is disabled for KMC_SLAB caches which are backed by the
991 * Linux slab.
992 */
993 static int
spl_cache_grow(spl_kmem_cache_t * skc,int flags,void ** obj)994 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
995 {
996 int remaining, rc = 0;
997
998 ASSERT0(flags & ~KM_PUBLIC_MASK);
999 ASSERT(skc->skc_magic == SKC_MAGIC);
1000 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1001
1002 *obj = NULL;
1003
1004 /*
1005 * Since we can't sleep attempt an emergency allocation to satisfy
1006 * the request. The only alterative is to fail the allocation but
1007 * it's preferable try. The use of KM_NOSLEEP is expected to be rare.
1008 */
1009 if (flags & KM_NOSLEEP)
1010 return (spl_emergency_alloc(skc, flags, obj));
1011
1012 might_sleep();
1013
1014 /*
1015 * Before allocating a new slab wait for any reaping to complete and
1016 * then return so the local magazine can be rechecked for new objects.
1017 */
1018 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1019 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1020 TASK_UNINTERRUPTIBLE);
1021 return (rc ? rc : -EAGAIN);
1022 }
1023
1024 /*
1025 * Note: It would be nice to reduce the overhead of context switch
1026 * and improve NUMA locality, by trying to allocate a new slab in the
1027 * current process context with KM_NOSLEEP flag.
1028 *
1029 * However, this can't be applied to vmem/kvmem due to a bug that
1030 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
1031 */
1032
1033 /*
1034 * This is handled by dispatching a work request to the global work
1035 * queue. This allows us to asynchronously allocate a new slab while
1036 * retaining the ability to safely fall back to a smaller synchronous
1037 * allocations to ensure forward progress is always maintained.
1038 */
1039 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1040 spl_kmem_alloc_t *ska;
1041
1042 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1043 if (ska == NULL) {
1044 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1045 smp_mb__after_atomic();
1046 wake_up_all(&skc->skc_waitq);
1047 return (-ENOMEM);
1048 }
1049
1050 atomic_inc(&skc->skc_ref);
1051 ska->ska_cache = skc;
1052 ska->ska_flags = flags;
1053 taskq_init_ent(&ska->ska_tqe);
1054 taskq_dispatch_ent(spl_kmem_cache_taskq,
1055 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1056 }
1057
1058 /*
1059 * The goal here is to only detect the rare case where a virtual slab
1060 * allocation has deadlocked. We must be careful to minimize the use
1061 * of emergency objects which are more expensive to track. Therefore,
1062 * we set a very long timeout for the asynchronous allocation and if
1063 * the timeout is reached the cache is flagged as deadlocked. From
1064 * this point only new emergency objects will be allocated until the
1065 * asynchronous allocation completes and clears the deadlocked flag.
1066 */
1067 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1068 rc = spl_emergency_alloc(skc, flags, obj);
1069 } else {
1070 remaining = wait_event_timeout(skc->skc_waitq,
1071 spl_cache_grow_wait(skc), HZ / 10);
1072
1073 if (!remaining) {
1074 spin_lock(&skc->skc_lock);
1075 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1076 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1077 skc->skc_obj_deadlock++;
1078 }
1079 spin_unlock(&skc->skc_lock);
1080 }
1081
1082 rc = -ENOMEM;
1083 }
1084
1085 return (rc);
1086 }
1087
1088 /*
1089 * Refill a per-cpu magazine with objects from the slabs for this cache.
1090 * Ideally the magazine can be repopulated using existing objects which have
1091 * been released, however if we are unable to locate enough free objects new
1092 * slabs of objects will be created. On success NULL is returned, otherwise
1093 * the address of a single emergency object is returned for use by the caller.
1094 */
1095 static void *
spl_cache_refill(spl_kmem_cache_t * skc,spl_kmem_magazine_t * skm,int flags)1096 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1097 {
1098 spl_kmem_slab_t *sks;
1099 int count = 0, rc, refill;
1100 void *obj = NULL;
1101
1102 ASSERT(skc->skc_magic == SKC_MAGIC);
1103 ASSERT(skm->skm_magic == SKM_MAGIC);
1104
1105 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1106 spin_lock(&skc->skc_lock);
1107
1108 while (refill > 0) {
1109 /* No slabs available we may need to grow the cache */
1110 if (list_empty(&skc->skc_partial_list)) {
1111 spin_unlock(&skc->skc_lock);
1112
1113 local_irq_enable();
1114 rc = spl_cache_grow(skc, flags, &obj);
1115 local_irq_disable();
1116
1117 /* Emergency object for immediate use by caller */
1118 if (rc == 0 && obj != NULL)
1119 return (obj);
1120
1121 if (rc)
1122 goto out;
1123
1124 /* Rescheduled to different CPU skm is not local */
1125 if (skm != skc->skc_mag[smp_processor_id()])
1126 goto out;
1127
1128 /*
1129 * Potentially rescheduled to the same CPU but
1130 * allocations may have occurred from this CPU while
1131 * we were sleeping so recalculate max refill.
1132 */
1133 refill = MIN(refill, skm->skm_size - skm->skm_avail);
1134
1135 spin_lock(&skc->skc_lock);
1136 continue;
1137 }
1138
1139 /* Grab the next available slab */
1140 sks = list_entry((&skc->skc_partial_list)->next,
1141 spl_kmem_slab_t, sks_list);
1142 ASSERT(sks->sks_magic == SKS_MAGIC);
1143 ASSERT(sks->sks_ref < sks->sks_objs);
1144 ASSERT(!list_empty(&sks->sks_free_list));
1145
1146 /*
1147 * Consume as many objects as needed to refill the requested
1148 * cache. We must also be careful not to overfill it.
1149 */
1150 while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1151 ++count) {
1152 ASSERT(skm->skm_avail < skm->skm_size);
1153 ASSERT(count < skm->skm_size);
1154 skm->skm_objs[skm->skm_avail++] =
1155 spl_cache_obj(skc, sks);
1156 }
1157
1158 /* Move slab to skc_complete_list when full */
1159 if (sks->sks_ref == sks->sks_objs) {
1160 list_del(&sks->sks_list);
1161 list_add(&sks->sks_list, &skc->skc_complete_list);
1162 }
1163 }
1164
1165 spin_unlock(&skc->skc_lock);
1166 out:
1167 return (NULL);
1168 }
1169
1170 /*
1171 * Release an object back to the slab from which it came.
1172 */
1173 static void
spl_cache_shrink(spl_kmem_cache_t * skc,void * obj)1174 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1175 {
1176 spl_kmem_slab_t *sks = NULL;
1177 spl_kmem_obj_t *sko = NULL;
1178
1179 ASSERT(skc->skc_magic == SKC_MAGIC);
1180
1181 sko = spl_sko_from_obj(skc, obj);
1182 ASSERT(sko->sko_magic == SKO_MAGIC);
1183 sks = sko->sko_slab;
1184 ASSERT(sks->sks_magic == SKS_MAGIC);
1185 ASSERT(sks->sks_cache == skc);
1186 list_add(&sko->sko_list, &sks->sks_free_list);
1187
1188 sks->sks_age = jiffies;
1189 sks->sks_ref--;
1190 skc->skc_obj_alloc--;
1191
1192 /*
1193 * Move slab to skc_partial_list when no longer full. Slabs
1194 * are added to the head to keep the partial list is quasi-full
1195 * sorted order. Fuller at the head, emptier at the tail.
1196 */
1197 if (sks->sks_ref == (sks->sks_objs - 1)) {
1198 list_del(&sks->sks_list);
1199 list_add(&sks->sks_list, &skc->skc_partial_list);
1200 }
1201
1202 /*
1203 * Move empty slabs to the end of the partial list so
1204 * they can be easily found and freed during reclamation.
1205 */
1206 if (sks->sks_ref == 0) {
1207 list_del(&sks->sks_list);
1208 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1209 skc->skc_slab_alloc--;
1210 }
1211 }
1212
1213 /*
1214 * Allocate an object from the per-cpu magazine, or if the magazine
1215 * is empty directly allocate from a slab and repopulate the magazine.
1216 */
1217 void *
spl_kmem_cache_alloc(spl_kmem_cache_t * skc,int flags)1218 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1219 {
1220 spl_kmem_magazine_t *skm;
1221 void *obj = NULL;
1222
1223 ASSERT0(flags & ~KM_PUBLIC_MASK);
1224 ASSERT(skc->skc_magic == SKC_MAGIC);
1225 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1226
1227 /*
1228 * Allocate directly from a Linux slab. All optimizations are left
1229 * to the underlying cache we only need to guarantee that KM_SLEEP
1230 * callers will never fail.
1231 */
1232 if (skc->skc_flags & KMC_SLAB) {
1233 struct kmem_cache *slc = skc->skc_linux_cache;
1234 do {
1235 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1236 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
1237
1238 if (obj != NULL) {
1239 /*
1240 * Even though we leave everything up to the
1241 * underlying cache we still keep track of
1242 * how many objects we've allocated in it for
1243 * better debuggability.
1244 */
1245 percpu_counter_inc(&skc->skc_linux_alloc);
1246 }
1247 goto ret;
1248 }
1249
1250 local_irq_disable();
1251
1252 restart:
1253 /*
1254 * Safe to update per-cpu structure without lock, but
1255 * in the restart case we must be careful to reacquire
1256 * the local magazine since this may have changed
1257 * when we need to grow the cache.
1258 */
1259 skm = skc->skc_mag[smp_processor_id()];
1260 ASSERT(skm->skm_magic == SKM_MAGIC);
1261
1262 if (likely(skm->skm_avail)) {
1263 /* Object available in CPU cache, use it */
1264 obj = skm->skm_objs[--skm->skm_avail];
1265 } else {
1266 obj = spl_cache_refill(skc, skm, flags);
1267 if ((obj == NULL) && !(flags & KM_NOSLEEP))
1268 goto restart;
1269
1270 local_irq_enable();
1271 goto ret;
1272 }
1273
1274 local_irq_enable();
1275 ASSERT(obj);
1276 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1277
1278 ret:
1279 /* Pre-emptively migrate object to CPU L1 cache */
1280 if (obj) {
1281 if (obj && skc->skc_ctor)
1282 skc->skc_ctor(obj, skc->skc_private, flags);
1283 else
1284 prefetchw(obj);
1285 }
1286
1287 return (obj);
1288 }
1289 EXPORT_SYMBOL(spl_kmem_cache_alloc);
1290
1291 /*
1292 * Free an object back to the local per-cpu magazine, there is no
1293 * guarantee that this is the same magazine the object was originally
1294 * allocated from. We may need to flush entire from the magazine
1295 * back to the slabs to make space.
1296 */
1297 void
spl_kmem_cache_free(spl_kmem_cache_t * skc,void * obj)1298 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1299 {
1300 spl_kmem_magazine_t *skm;
1301 unsigned long flags;
1302 int do_reclaim = 0;
1303 int do_emergency = 0;
1304
1305 ASSERT(skc->skc_magic == SKC_MAGIC);
1306 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1307
1308 /*
1309 * Run the destructor
1310 */
1311 if (skc->skc_dtor)
1312 skc->skc_dtor(obj, skc->skc_private);
1313
1314 /*
1315 * Free the object from the Linux underlying Linux slab.
1316 */
1317 if (skc->skc_flags & KMC_SLAB) {
1318 kmem_cache_free(skc->skc_linux_cache, obj);
1319 percpu_counter_dec(&skc->skc_linux_alloc);
1320 return;
1321 }
1322
1323 /*
1324 * While a cache has outstanding emergency objects all freed objects
1325 * must be checked. However, since emergency objects will never use
1326 * a virtual address these objects can be safely excluded as an
1327 * optimization.
1328 */
1329 if (!is_vmalloc_addr(obj)) {
1330 spin_lock(&skc->skc_lock);
1331 do_emergency = (skc->skc_obj_emergency > 0);
1332 spin_unlock(&skc->skc_lock);
1333
1334 if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1335 return;
1336 }
1337
1338 local_irq_save(flags);
1339
1340 /*
1341 * Safe to update per-cpu structure without lock, but
1342 * no remote memory allocation tracking is being performed
1343 * it is entirely possible to allocate an object from one
1344 * CPU cache and return it to another.
1345 */
1346 skm = skc->skc_mag[smp_processor_id()];
1347 ASSERT(skm->skm_magic == SKM_MAGIC);
1348
1349 /*
1350 * Per-CPU cache full, flush it to make space for this object,
1351 * this may result in an empty slab which can be reclaimed once
1352 * interrupts are re-enabled.
1353 */
1354 if (unlikely(skm->skm_avail >= skm->skm_size)) {
1355 spl_cache_flush(skc, skm, skm->skm_refill);
1356 do_reclaim = 1;
1357 }
1358
1359 /* Available space in cache, use it */
1360 skm->skm_objs[skm->skm_avail++] = obj;
1361
1362 local_irq_restore(flags);
1363
1364 if (do_reclaim)
1365 spl_slab_reclaim(skc);
1366 }
1367 EXPORT_SYMBOL(spl_kmem_cache_free);
1368
1369 /*
1370 * Depending on how many and which objects are released it may simply
1371 * repopulate the local magazine which will then need to age-out. Objects
1372 * which cannot fit in the magazine will be released back to their slabs
1373 * which will also need to age out before being released. This is all just
1374 * best effort and we do not want to thrash creating and destroying slabs.
1375 */
1376 void
spl_kmem_cache_reap_now(spl_kmem_cache_t * skc)1377 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1378 {
1379 ASSERT(skc->skc_magic == SKC_MAGIC);
1380 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1381
1382 if (skc->skc_flags & KMC_SLAB)
1383 return;
1384
1385 atomic_inc(&skc->skc_ref);
1386
1387 /*
1388 * Prevent concurrent cache reaping when contended.
1389 */
1390 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1391 goto out;
1392
1393 /* Reclaim from the magazine and free all now empty slabs. */
1394 unsigned long irq_flags;
1395 local_irq_save(irq_flags);
1396 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1397 spl_cache_flush(skc, skm, skm->skm_avail);
1398 local_irq_restore(irq_flags);
1399
1400 spl_slab_reclaim(skc);
1401 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1402 smp_mb__after_atomic();
1403 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1404 out:
1405 atomic_dec(&skc->skc_ref);
1406 }
1407 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1408
1409 /*
1410 * This is stubbed out for code consistency with other platforms. There
1411 * is existing logic to prevent concurrent reaping so while this is ugly
1412 * it should do no harm.
1413 */
1414 int
spl_kmem_cache_reap_active(void)1415 spl_kmem_cache_reap_active(void)
1416 {
1417 return (0);
1418 }
1419 EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1420
1421 /*
1422 * Reap all free slabs from all registered caches.
1423 */
1424 void
spl_kmem_reap(void)1425 spl_kmem_reap(void)
1426 {
1427 spl_kmem_cache_t *skc = NULL;
1428
1429 down_read(&spl_kmem_cache_sem);
1430 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1431 spl_kmem_cache_reap_now(skc);
1432 }
1433 up_read(&spl_kmem_cache_sem);
1434 }
1435 EXPORT_SYMBOL(spl_kmem_reap);
1436
1437 int
spl_kmem_cache_init(void)1438 spl_kmem_cache_init(void)
1439 {
1440 init_rwsem(&spl_kmem_cache_sem);
1441 INIT_LIST_HEAD(&spl_kmem_cache_list);
1442 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1443 spl_kmem_cache_kmem_threads, maxclsyspri,
1444 spl_kmem_cache_kmem_threads * 8, INT_MAX,
1445 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1446
1447 if (spl_kmem_cache_taskq == NULL)
1448 return (-ENOMEM);
1449
1450 return (0);
1451 }
1452
1453 void
spl_kmem_cache_fini(void)1454 spl_kmem_cache_fini(void)
1455 {
1456 taskq_destroy(spl_kmem_cache_taskq);
1457 }
1458