1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include <linux/percpu_compat.h> 25 #include <sys/kmem.h> 26 #include <sys/kmem_cache.h> 27 #include <sys/taskq.h> 28 #include <sys/timer.h> 29 #include <sys/vmem.h> 30 #include <sys/wait.h> 31 #include <linux/slab.h> 32 #include <linux/swap.h> 33 #include <linux/prefetch.h> 34 35 /* 36 * Within the scope of spl-kmem.c file the kmem_cache_* definitions 37 * are removed to allow access to the real Linux slab allocator. 38 */ 39 #undef kmem_cache_destroy 40 #undef kmem_cache_create 41 #undef kmem_cache_alloc 42 #undef kmem_cache_free 43 44 45 /* 46 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() 47 * with smp_mb__{before,after}_atomic() because they were redundant. This is 48 * only used inside our SLAB allocator, so we implement an internal wrapper 49 * here to give us smp_mb__{before,after}_atomic() on older kernels. 50 */ 51 #ifndef smp_mb__before_atomic 52 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) 53 #endif 54 55 #ifndef smp_mb__after_atomic 56 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) 57 #endif 58 59 /* BEGIN CSTYLED */ 60 /* 61 * Cache magazines are an optimization designed to minimize the cost of 62 * allocating memory. They do this by keeping a per-cpu cache of recently 63 * freed objects, which can then be reallocated without taking a lock. This 64 * can improve performance on highly contended caches. However, because 65 * objects in magazines will prevent otherwise empty slabs from being 66 * immediately released this may not be ideal for low memory machines. 67 * 68 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum 69 * magazine size. When this value is set to 0 the magazine size will be 70 * automatically determined based on the object size. Otherwise magazines 71 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines 72 * may never be entirely disabled in this implementation. 73 */ 74 static unsigned int spl_kmem_cache_magazine_size = 0; 75 module_param(spl_kmem_cache_magazine_size, uint, 0444); 76 MODULE_PARM_DESC(spl_kmem_cache_magazine_size, 77 "Default magazine size (2-256), set automatically (0)"); 78 79 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; 80 module_param(spl_kmem_cache_obj_per_slab, uint, 0644); 81 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); 82 83 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; 84 module_param(spl_kmem_cache_max_size, uint, 0644); 85 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); 86 87 /* 88 * For small objects the Linux slab allocator should be used to make the most 89 * efficient use of the memory. However, large objects are not supported by 90 * the Linux slab and therefore the SPL implementation is preferred. A cutoff 91 * of 16K was determined to be optimal for architectures using 4K pages and 92 * to also work well on architecutres using larger 64K page sizes. 93 */ 94 static unsigned int spl_kmem_cache_slab_limit = 95 SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE; 96 module_param(spl_kmem_cache_slab_limit, uint, 0644); 97 MODULE_PARM_DESC(spl_kmem_cache_slab_limit, 98 "Objects less than N bytes use the Linux slab"); 99 100 /* 101 * The number of threads available to allocate new slabs for caches. This 102 * should not need to be tuned but it is available for performance analysis. 103 */ 104 static unsigned int spl_kmem_cache_kmem_threads = 4; 105 module_param(spl_kmem_cache_kmem_threads, uint, 0444); 106 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, 107 "Number of spl_kmem_cache threads"); 108 /* END CSTYLED */ 109 110 /* 111 * Slab allocation interfaces 112 * 113 * While the Linux slab implementation was inspired by the Solaris 114 * implementation I cannot use it to emulate the Solaris APIs. I 115 * require two features which are not provided by the Linux slab. 116 * 117 * 1) Constructors AND destructors. Recent versions of the Linux 118 * kernel have removed support for destructors. This is a deal 119 * breaker for the SPL which contains particularly expensive 120 * initializers for mutex's, condition variables, etc. We also 121 * require a minimal level of cleanup for these data types unlike 122 * many Linux data types which do need to be explicitly destroyed. 123 * 124 * 2) Virtual address space backed slab. Callers of the Solaris slab 125 * expect it to work well for both small are very large allocations. 126 * Because of memory fragmentation the Linux slab which is backed 127 * by kmalloc'ed memory performs very badly when confronted with 128 * large numbers of large allocations. Basing the slab on the 129 * virtual address space removes the need for contiguous pages 130 * and greatly improve performance for large allocations. 131 * 132 * For these reasons, the SPL has its own slab implementation with 133 * the needed features. It is not as highly optimized as either the 134 * Solaris or Linux slabs, but it should get me most of what is 135 * needed until it can be optimized or obsoleted by another approach. 136 * 137 * One serious concern I do have about this method is the relatively 138 * small virtual address space on 32bit arches. This will seriously 139 * constrain the size of the slab caches and their performance. 140 */ 141 142 struct list_head spl_kmem_cache_list; /* List of caches */ 143 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ 144 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ 145 146 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); 147 148 static void * 149 kv_alloc(spl_kmem_cache_t *skc, int size, int flags) 150 { 151 gfp_t lflags = kmem_flags_convert(flags); 152 void *ptr; 153 154 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); 155 156 /* Resulting allocated memory will be page aligned */ 157 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 158 159 return (ptr); 160 } 161 162 static void 163 kv_free(spl_kmem_cache_t *skc, void *ptr, int size) 164 { 165 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 166 167 /* 168 * The Linux direct reclaim path uses this out of band value to 169 * determine if forward progress is being made. Normally this is 170 * incremented by kmem_freepages() which is part of the various 171 * Linux slab implementations. However, since we are using none 172 * of that infrastructure we are responsible for incrementing it. 173 */ 174 if (current->reclaim_state) 175 #ifdef HAVE_RECLAIM_STATE_RECLAIMED 176 current->reclaim_state->reclaimed += size >> PAGE_SHIFT; 177 #else 178 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; 179 #endif 180 vfree(ptr); 181 } 182 183 /* 184 * Required space for each aligned sks. 185 */ 186 static inline uint32_t 187 spl_sks_size(spl_kmem_cache_t *skc) 188 { 189 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), 190 skc->skc_obj_align, uint32_t)); 191 } 192 193 /* 194 * Required space for each aligned object. 195 */ 196 static inline uint32_t 197 spl_obj_size(spl_kmem_cache_t *skc) 198 { 199 uint32_t align = skc->skc_obj_align; 200 201 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + 202 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); 203 } 204 205 uint64_t 206 spl_kmem_cache_inuse(kmem_cache_t *cache) 207 { 208 return (cache->skc_obj_total); 209 } 210 EXPORT_SYMBOL(spl_kmem_cache_inuse); 211 212 uint64_t 213 spl_kmem_cache_entry_size(kmem_cache_t *cache) 214 { 215 return (cache->skc_obj_size); 216 } 217 EXPORT_SYMBOL(spl_kmem_cache_entry_size); 218 219 /* 220 * Lookup the spl_kmem_object_t for an object given that object. 221 */ 222 static inline spl_kmem_obj_t * 223 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) 224 { 225 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, 226 skc->skc_obj_align, uint32_t)); 227 } 228 229 /* 230 * It's important that we pack the spl_kmem_obj_t structure and the 231 * actual objects in to one large address space to minimize the number 232 * of calls to the allocator. It is far better to do a few large 233 * allocations and then subdivide it ourselves. Now which allocator 234 * we use requires balancing a few trade offs. 235 * 236 * For small objects we use kmem_alloc() because as long as you are 237 * only requesting a small number of pages (ideally just one) its cheap. 238 * However, when you start requesting multiple pages with kmem_alloc() 239 * it gets increasingly expensive since it requires contiguous pages. 240 * For this reason we shift to vmem_alloc() for slabs of large objects 241 * which removes the need for contiguous pages. We do not use 242 * vmem_alloc() in all cases because there is significant locking 243 * overhead in __get_vm_area_node(). This function takes a single 244 * global lock when acquiring an available virtual address range which 245 * serializes all vmem_alloc()'s for all slab caches. Using slightly 246 * different allocation functions for small and large objects should 247 * give us the best of both worlds. 248 * 249 * +------------------------+ 250 * | spl_kmem_slab_t --+-+ | 251 * | skc_obj_size <-+ | | 252 * | spl_kmem_obj_t | | 253 * | skc_obj_size <---+ | 254 * | spl_kmem_obj_t | | 255 * | ... v | 256 * +------------------------+ 257 */ 258 static spl_kmem_slab_t * 259 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) 260 { 261 spl_kmem_slab_t *sks; 262 void *base; 263 uint32_t obj_size; 264 265 base = kv_alloc(skc, skc->skc_slab_size, flags); 266 if (base == NULL) 267 return (NULL); 268 269 sks = (spl_kmem_slab_t *)base; 270 sks->sks_magic = SKS_MAGIC; 271 sks->sks_objs = skc->skc_slab_objs; 272 sks->sks_age = jiffies; 273 sks->sks_cache = skc; 274 INIT_LIST_HEAD(&sks->sks_list); 275 INIT_LIST_HEAD(&sks->sks_free_list); 276 sks->sks_ref = 0; 277 obj_size = spl_obj_size(skc); 278 279 for (int i = 0; i < sks->sks_objs; i++) { 280 void *obj = base + spl_sks_size(skc) + (i * obj_size); 281 282 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 283 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); 284 sko->sko_addr = obj; 285 sko->sko_magic = SKO_MAGIC; 286 sko->sko_slab = sks; 287 INIT_LIST_HEAD(&sko->sko_list); 288 list_add_tail(&sko->sko_list, &sks->sks_free_list); 289 } 290 291 return (sks); 292 } 293 294 /* 295 * Remove a slab from complete or partial list, it must be called with 296 * the 'skc->skc_lock' held but the actual free must be performed 297 * outside the lock to prevent deadlocking on vmem addresses. 298 */ 299 static void 300 spl_slab_free(spl_kmem_slab_t *sks, 301 struct list_head *sks_list, struct list_head *sko_list) 302 { 303 spl_kmem_cache_t *skc; 304 305 ASSERT(sks->sks_magic == SKS_MAGIC); 306 ASSERT(sks->sks_ref == 0); 307 308 skc = sks->sks_cache; 309 ASSERT(skc->skc_magic == SKC_MAGIC); 310 311 /* 312 * Update slab/objects counters in the cache, then remove the 313 * slab from the skc->skc_partial_list. Finally add the slab 314 * and all its objects in to the private work lists where the 315 * destructors will be called and the memory freed to the system. 316 */ 317 skc->skc_obj_total -= sks->sks_objs; 318 skc->skc_slab_total--; 319 list_del(&sks->sks_list); 320 list_add(&sks->sks_list, sks_list); 321 list_splice_init(&sks->sks_free_list, sko_list); 322 } 323 324 /* 325 * Reclaim empty slabs at the end of the partial list. 326 */ 327 static void 328 spl_slab_reclaim(spl_kmem_cache_t *skc) 329 { 330 spl_kmem_slab_t *sks = NULL, *m = NULL; 331 spl_kmem_obj_t *sko = NULL, *n = NULL; 332 LIST_HEAD(sks_list); 333 LIST_HEAD(sko_list); 334 335 /* 336 * Empty slabs and objects must be moved to a private list so they 337 * can be safely freed outside the spin lock. All empty slabs are 338 * at the end of skc->skc_partial_list, therefore once a non-empty 339 * slab is found we can stop scanning. 340 */ 341 spin_lock(&skc->skc_lock); 342 list_for_each_entry_safe_reverse(sks, m, 343 &skc->skc_partial_list, sks_list) { 344 345 if (sks->sks_ref > 0) 346 break; 347 348 spl_slab_free(sks, &sks_list, &sko_list); 349 } 350 spin_unlock(&skc->skc_lock); 351 352 /* 353 * The following two loops ensure all the object destructors are run, 354 * and the slabs themselves are freed. This is all done outside the 355 * skc->skc_lock since this allows the destructor to sleep, and 356 * allows us to perform a conditional reschedule when a freeing a 357 * large number of objects and slabs back to the system. 358 */ 359 360 list_for_each_entry_safe(sko, n, &sko_list, sko_list) { 361 ASSERT(sko->sko_magic == SKO_MAGIC); 362 } 363 364 list_for_each_entry_safe(sks, m, &sks_list, sks_list) { 365 ASSERT(sks->sks_magic == SKS_MAGIC); 366 kv_free(skc, sks, skc->skc_slab_size); 367 } 368 } 369 370 static spl_kmem_emergency_t * 371 spl_emergency_search(struct rb_root *root, void *obj) 372 { 373 struct rb_node *node = root->rb_node; 374 spl_kmem_emergency_t *ske; 375 unsigned long address = (unsigned long)obj; 376 377 while (node) { 378 ske = container_of(node, spl_kmem_emergency_t, ske_node); 379 380 if (address < ske->ske_obj) 381 node = node->rb_left; 382 else if (address > ske->ske_obj) 383 node = node->rb_right; 384 else 385 return (ske); 386 } 387 388 return (NULL); 389 } 390 391 static int 392 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) 393 { 394 struct rb_node **new = &(root->rb_node), *parent = NULL; 395 spl_kmem_emergency_t *ske_tmp; 396 unsigned long address = ske->ske_obj; 397 398 while (*new) { 399 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); 400 401 parent = *new; 402 if (address < ske_tmp->ske_obj) 403 new = &((*new)->rb_left); 404 else if (address > ske_tmp->ske_obj) 405 new = &((*new)->rb_right); 406 else 407 return (0); 408 } 409 410 rb_link_node(&ske->ske_node, parent, new); 411 rb_insert_color(&ske->ske_node, root); 412 413 return (1); 414 } 415 416 /* 417 * Allocate a single emergency object and track it in a red black tree. 418 */ 419 static int 420 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) 421 { 422 gfp_t lflags = kmem_flags_convert(flags); 423 spl_kmem_emergency_t *ske; 424 int order = get_order(skc->skc_obj_size); 425 int empty; 426 427 /* Last chance use a partial slab if one now exists */ 428 spin_lock(&skc->skc_lock); 429 empty = list_empty(&skc->skc_partial_list); 430 spin_unlock(&skc->skc_lock); 431 if (!empty) 432 return (-EEXIST); 433 434 ske = kmalloc(sizeof (*ske), lflags); 435 if (ske == NULL) 436 return (-ENOMEM); 437 438 ske->ske_obj = __get_free_pages(lflags, order); 439 if (ske->ske_obj == 0) { 440 kfree(ske); 441 return (-ENOMEM); 442 } 443 444 spin_lock(&skc->skc_lock); 445 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); 446 if (likely(empty)) { 447 skc->skc_obj_total++; 448 skc->skc_obj_emergency++; 449 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) 450 skc->skc_obj_emergency_max = skc->skc_obj_emergency; 451 } 452 spin_unlock(&skc->skc_lock); 453 454 if (unlikely(!empty)) { 455 free_pages(ske->ske_obj, order); 456 kfree(ske); 457 return (-EINVAL); 458 } 459 460 *obj = (void *)ske->ske_obj; 461 462 return (0); 463 } 464 465 /* 466 * Locate the passed object in the red black tree and free it. 467 */ 468 static int 469 spl_emergency_free(spl_kmem_cache_t *skc, void *obj) 470 { 471 spl_kmem_emergency_t *ske; 472 int order = get_order(skc->skc_obj_size); 473 474 spin_lock(&skc->skc_lock); 475 ske = spl_emergency_search(&skc->skc_emergency_tree, obj); 476 if (ske) { 477 rb_erase(&ske->ske_node, &skc->skc_emergency_tree); 478 skc->skc_obj_emergency--; 479 skc->skc_obj_total--; 480 } 481 spin_unlock(&skc->skc_lock); 482 483 if (ske == NULL) 484 return (-ENOENT); 485 486 free_pages(ske->ske_obj, order); 487 kfree(ske); 488 489 return (0); 490 } 491 492 /* 493 * Release objects from the per-cpu magazine back to their slab. The flush 494 * argument contains the max number of entries to remove from the magazine. 495 */ 496 static void 497 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) 498 { 499 spin_lock(&skc->skc_lock); 500 501 ASSERT(skc->skc_magic == SKC_MAGIC); 502 ASSERT(skm->skm_magic == SKM_MAGIC); 503 504 int count = MIN(flush, skm->skm_avail); 505 for (int i = 0; i < count; i++) 506 spl_cache_shrink(skc, skm->skm_objs[i]); 507 508 skm->skm_avail -= count; 509 memmove(skm->skm_objs, &(skm->skm_objs[count]), 510 sizeof (void *) * skm->skm_avail); 511 512 spin_unlock(&skc->skc_lock); 513 } 514 515 /* 516 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. 517 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, 518 * for very small objects we may end up with more than this so as not 519 * to waste space in the minimal allocation of a single page. 520 */ 521 static int 522 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) 523 { 524 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; 525 526 sks_size = spl_sks_size(skc); 527 obj_size = spl_obj_size(skc); 528 max_size = (spl_kmem_cache_max_size * 1024 * 1024); 529 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); 530 531 if (tgt_size <= max_size) { 532 tgt_objs = (tgt_size - sks_size) / obj_size; 533 } else { 534 tgt_objs = (max_size - sks_size) / obj_size; 535 tgt_size = (tgt_objs * obj_size) + sks_size; 536 } 537 538 if (tgt_objs == 0) 539 return (-ENOSPC); 540 541 *objs = tgt_objs; 542 *size = tgt_size; 543 544 return (0); 545 } 546 547 /* 548 * Make a guess at reasonable per-cpu magazine size based on the size of 549 * each object and the cost of caching N of them in each magazine. Long 550 * term this should really adapt based on an observed usage heuristic. 551 */ 552 static int 553 spl_magazine_size(spl_kmem_cache_t *skc) 554 { 555 uint32_t obj_size = spl_obj_size(skc); 556 int size; 557 558 if (spl_kmem_cache_magazine_size > 0) 559 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); 560 561 /* Per-magazine sizes below assume a 4Kib page size */ 562 if (obj_size > (PAGE_SIZE * 256)) 563 size = 4; /* Minimum 4Mib per-magazine */ 564 else if (obj_size > (PAGE_SIZE * 32)) 565 size = 16; /* Minimum 2Mib per-magazine */ 566 else if (obj_size > (PAGE_SIZE)) 567 size = 64; /* Minimum 256Kib per-magazine */ 568 else if (obj_size > (PAGE_SIZE / 4)) 569 size = 128; /* Minimum 128Kib per-magazine */ 570 else 571 size = 256; 572 573 return (size); 574 } 575 576 /* 577 * Allocate a per-cpu magazine to associate with a specific core. 578 */ 579 static spl_kmem_magazine_t * 580 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) 581 { 582 spl_kmem_magazine_t *skm; 583 int size = sizeof (spl_kmem_magazine_t) + 584 sizeof (void *) * skc->skc_mag_size; 585 586 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 587 if (skm) { 588 skm->skm_magic = SKM_MAGIC; 589 skm->skm_avail = 0; 590 skm->skm_size = skc->skc_mag_size; 591 skm->skm_refill = skc->skc_mag_refill; 592 skm->skm_cache = skc; 593 skm->skm_cpu = cpu; 594 } 595 596 return (skm); 597 } 598 599 /* 600 * Free a per-cpu magazine associated with a specific core. 601 */ 602 static void 603 spl_magazine_free(spl_kmem_magazine_t *skm) 604 { 605 ASSERT(skm->skm_magic == SKM_MAGIC); 606 ASSERT(skm->skm_avail == 0); 607 kfree(skm); 608 } 609 610 /* 611 * Create all pre-cpu magazines of reasonable sizes. 612 */ 613 static int 614 spl_magazine_create(spl_kmem_cache_t *skc) 615 { 616 int i = 0; 617 618 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 619 620 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * 621 num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); 622 skc->skc_mag_size = spl_magazine_size(skc); 623 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; 624 625 for_each_possible_cpu(i) { 626 skc->skc_mag[i] = spl_magazine_alloc(skc, i); 627 if (!skc->skc_mag[i]) { 628 for (i--; i >= 0; i--) 629 spl_magazine_free(skc->skc_mag[i]); 630 631 kfree(skc->skc_mag); 632 return (-ENOMEM); 633 } 634 } 635 636 return (0); 637 } 638 639 /* 640 * Destroy all pre-cpu magazines. 641 */ 642 static void 643 spl_magazine_destroy(spl_kmem_cache_t *skc) 644 { 645 spl_kmem_magazine_t *skm; 646 int i = 0; 647 648 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 649 650 for_each_possible_cpu(i) { 651 skm = skc->skc_mag[i]; 652 spl_cache_flush(skc, skm, skm->skm_avail); 653 spl_magazine_free(skm); 654 } 655 656 kfree(skc->skc_mag); 657 } 658 659 /* 660 * Create a object cache based on the following arguments: 661 * name cache name 662 * size cache object size 663 * align cache object alignment 664 * ctor cache object constructor 665 * dtor cache object destructor 666 * reclaim cache object reclaim 667 * priv cache private data for ctor/dtor/reclaim 668 * vmp unused must be NULL 669 * flags 670 * KMC_KVMEM Force kvmem backed SPL cache 671 * KMC_SLAB Force Linux slab backed cache 672 * KMC_NODEBUG Disable debugging (unsupported) 673 */ 674 spl_kmem_cache_t * 675 spl_kmem_cache_create(const char *name, size_t size, size_t align, 676 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, 677 void *priv, void *vmp, int flags) 678 { 679 gfp_t lflags = kmem_flags_convert(KM_SLEEP); 680 spl_kmem_cache_t *skc; 681 int rc; 682 683 /* 684 * Unsupported flags 685 */ 686 ASSERT(vmp == NULL); 687 ASSERT(reclaim == NULL); 688 689 might_sleep(); 690 691 skc = kzalloc(sizeof (*skc), lflags); 692 if (skc == NULL) 693 return (NULL); 694 695 skc->skc_magic = SKC_MAGIC; 696 skc->skc_name_size = strlen(name) + 1; 697 skc->skc_name = kmalloc(skc->skc_name_size, lflags); 698 if (skc->skc_name == NULL) { 699 kfree(skc); 700 return (NULL); 701 } 702 strlcpy(skc->skc_name, name, skc->skc_name_size); 703 704 skc->skc_ctor = ctor; 705 skc->skc_dtor = dtor; 706 skc->skc_private = priv; 707 skc->skc_vmp = vmp; 708 skc->skc_linux_cache = NULL; 709 skc->skc_flags = flags; 710 skc->skc_obj_size = size; 711 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; 712 atomic_set(&skc->skc_ref, 0); 713 714 INIT_LIST_HEAD(&skc->skc_list); 715 INIT_LIST_HEAD(&skc->skc_complete_list); 716 INIT_LIST_HEAD(&skc->skc_partial_list); 717 skc->skc_emergency_tree = RB_ROOT; 718 spin_lock_init(&skc->skc_lock); 719 init_waitqueue_head(&skc->skc_waitq); 720 skc->skc_slab_fail = 0; 721 skc->skc_slab_create = 0; 722 skc->skc_slab_destroy = 0; 723 skc->skc_slab_total = 0; 724 skc->skc_slab_alloc = 0; 725 skc->skc_slab_max = 0; 726 skc->skc_obj_total = 0; 727 skc->skc_obj_alloc = 0; 728 skc->skc_obj_max = 0; 729 skc->skc_obj_deadlock = 0; 730 skc->skc_obj_emergency = 0; 731 skc->skc_obj_emergency_max = 0; 732 733 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0, 734 GFP_KERNEL); 735 if (rc != 0) { 736 kfree(skc); 737 return (NULL); 738 } 739 740 /* 741 * Verify the requested alignment restriction is sane. 742 */ 743 if (align) { 744 VERIFY(ISP2(align)); 745 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); 746 VERIFY3U(align, <=, PAGE_SIZE); 747 skc->skc_obj_align = align; 748 } 749 750 /* 751 * When no specific type of slab is requested (kmem, vmem, or 752 * linuxslab) then select a cache type based on the object size 753 * and default tunables. 754 */ 755 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { 756 if (spl_kmem_cache_slab_limit && 757 size <= (size_t)spl_kmem_cache_slab_limit) { 758 /* 759 * Objects smaller than spl_kmem_cache_slab_limit can 760 * use the Linux slab for better space-efficiency. 761 */ 762 skc->skc_flags |= KMC_SLAB; 763 } else { 764 /* 765 * All other objects are considered large and are 766 * placed on kvmem backed slabs. 767 */ 768 skc->skc_flags |= KMC_KVMEM; 769 } 770 } 771 772 /* 773 * Given the type of slab allocate the required resources. 774 */ 775 if (skc->skc_flags & KMC_KVMEM) { 776 rc = spl_slab_size(skc, 777 &skc->skc_slab_objs, &skc->skc_slab_size); 778 if (rc) 779 goto out; 780 781 rc = spl_magazine_create(skc); 782 if (rc) 783 goto out; 784 } else { 785 unsigned long slabflags = 0; 786 787 if (size > spl_kmem_cache_slab_limit) 788 goto out; 789 790 #if defined(SLAB_USERCOPY) 791 /* 792 * Required for PAX-enabled kernels if the slab is to be 793 * used for copying between user and kernel space. 794 */ 795 slabflags |= SLAB_USERCOPY; 796 #endif 797 798 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY) 799 /* 800 * Newer grsec patchset uses kmem_cache_create_usercopy() 801 * instead of SLAB_USERCOPY flag 802 */ 803 skc->skc_linux_cache = kmem_cache_create_usercopy( 804 skc->skc_name, size, align, slabflags, 0, size, NULL); 805 #else 806 skc->skc_linux_cache = kmem_cache_create( 807 skc->skc_name, size, align, slabflags, NULL); 808 #endif 809 if (skc->skc_linux_cache == NULL) 810 goto out; 811 } 812 813 down_write(&spl_kmem_cache_sem); 814 list_add_tail(&skc->skc_list, &spl_kmem_cache_list); 815 up_write(&spl_kmem_cache_sem); 816 817 return (skc); 818 out: 819 kfree(skc->skc_name); 820 percpu_counter_destroy(&skc->skc_linux_alloc); 821 kfree(skc); 822 return (NULL); 823 } 824 EXPORT_SYMBOL(spl_kmem_cache_create); 825 826 /* 827 * Register a move callback for cache defragmentation. 828 * XXX: Unimplemented but harmless to stub out for now. 829 */ 830 void 831 spl_kmem_cache_set_move(spl_kmem_cache_t *skc, 832 kmem_cbrc_t (move)(void *, void *, size_t, void *)) 833 { 834 ASSERT(move != NULL); 835 } 836 EXPORT_SYMBOL(spl_kmem_cache_set_move); 837 838 /* 839 * Destroy a cache and all objects associated with the cache. 840 */ 841 void 842 spl_kmem_cache_destroy(spl_kmem_cache_t *skc) 843 { 844 DECLARE_WAIT_QUEUE_HEAD(wq); 845 taskqid_t id; 846 847 ASSERT(skc->skc_magic == SKC_MAGIC); 848 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); 849 850 down_write(&spl_kmem_cache_sem); 851 list_del_init(&skc->skc_list); 852 up_write(&spl_kmem_cache_sem); 853 854 /* Cancel any and wait for any pending delayed tasks */ 855 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 856 857 spin_lock(&skc->skc_lock); 858 id = skc->skc_taskqid; 859 spin_unlock(&skc->skc_lock); 860 861 taskq_cancel_id(spl_kmem_cache_taskq, id); 862 863 /* 864 * Wait until all current callers complete, this is mainly 865 * to catch the case where a low memory situation triggers a 866 * cache reaping action which races with this destroy. 867 */ 868 wait_event(wq, atomic_read(&skc->skc_ref) == 0); 869 870 if (skc->skc_flags & KMC_KVMEM) { 871 spl_magazine_destroy(skc); 872 spl_slab_reclaim(skc); 873 } else { 874 ASSERT(skc->skc_flags & KMC_SLAB); 875 kmem_cache_destroy(skc->skc_linux_cache); 876 } 877 878 spin_lock(&skc->skc_lock); 879 880 /* 881 * Validate there are no objects in use and free all the 882 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. 883 */ 884 ASSERT3U(skc->skc_slab_alloc, ==, 0); 885 ASSERT3U(skc->skc_obj_alloc, ==, 0); 886 ASSERT3U(skc->skc_slab_total, ==, 0); 887 ASSERT3U(skc->skc_obj_total, ==, 0); 888 ASSERT3U(skc->skc_obj_emergency, ==, 0); 889 ASSERT(list_empty(&skc->skc_complete_list)); 890 891 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); 892 percpu_counter_destroy(&skc->skc_linux_alloc); 893 894 spin_unlock(&skc->skc_lock); 895 896 kfree(skc->skc_name); 897 kfree(skc); 898 } 899 EXPORT_SYMBOL(spl_kmem_cache_destroy); 900 901 /* 902 * Allocate an object from a slab attached to the cache. This is used to 903 * repopulate the per-cpu magazine caches in batches when they run low. 904 */ 905 static void * 906 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) 907 { 908 spl_kmem_obj_t *sko; 909 910 ASSERT(skc->skc_magic == SKC_MAGIC); 911 ASSERT(sks->sks_magic == SKS_MAGIC); 912 913 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); 914 ASSERT(sko->sko_magic == SKO_MAGIC); 915 ASSERT(sko->sko_addr != NULL); 916 917 /* Remove from sks_free_list */ 918 list_del_init(&sko->sko_list); 919 920 sks->sks_age = jiffies; 921 sks->sks_ref++; 922 skc->skc_obj_alloc++; 923 924 /* Track max obj usage statistics */ 925 if (skc->skc_obj_alloc > skc->skc_obj_max) 926 skc->skc_obj_max = skc->skc_obj_alloc; 927 928 /* Track max slab usage statistics */ 929 if (sks->sks_ref == 1) { 930 skc->skc_slab_alloc++; 931 932 if (skc->skc_slab_alloc > skc->skc_slab_max) 933 skc->skc_slab_max = skc->skc_slab_alloc; 934 } 935 936 return (sko->sko_addr); 937 } 938 939 /* 940 * Generic slab allocation function to run by the global work queues. 941 * It is responsible for allocating a new slab, linking it in to the list 942 * of partial slabs, and then waking any waiters. 943 */ 944 static int 945 __spl_cache_grow(spl_kmem_cache_t *skc, int flags) 946 { 947 spl_kmem_slab_t *sks; 948 949 fstrans_cookie_t cookie = spl_fstrans_mark(); 950 sks = spl_slab_alloc(skc, flags); 951 spl_fstrans_unmark(cookie); 952 953 spin_lock(&skc->skc_lock); 954 if (sks) { 955 skc->skc_slab_total++; 956 skc->skc_obj_total += sks->sks_objs; 957 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 958 959 smp_mb__before_atomic(); 960 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 961 smp_mb__after_atomic(); 962 } 963 spin_unlock(&skc->skc_lock); 964 965 return (sks == NULL ? -ENOMEM : 0); 966 } 967 968 static void 969 spl_cache_grow_work(void *data) 970 { 971 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; 972 spl_kmem_cache_t *skc = ska->ska_cache; 973 974 int error = __spl_cache_grow(skc, ska->ska_flags); 975 976 atomic_dec(&skc->skc_ref); 977 smp_mb__before_atomic(); 978 clear_bit(KMC_BIT_GROWING, &skc->skc_flags); 979 smp_mb__after_atomic(); 980 if (error == 0) 981 wake_up_all(&skc->skc_waitq); 982 983 kfree(ska); 984 } 985 986 /* 987 * Returns non-zero when a new slab should be available. 988 */ 989 static int 990 spl_cache_grow_wait(spl_kmem_cache_t *skc) 991 { 992 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); 993 } 994 995 /* 996 * No available objects on any slabs, create a new slab. Note that this 997 * functionality is disabled for KMC_SLAB caches which are backed by the 998 * Linux slab. 999 */ 1000 static int 1001 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) 1002 { 1003 int remaining, rc = 0; 1004 1005 ASSERT0(flags & ~KM_PUBLIC_MASK); 1006 ASSERT(skc->skc_magic == SKC_MAGIC); 1007 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 1008 1009 *obj = NULL; 1010 1011 /* 1012 * Since we can't sleep attempt an emergency allocation to satisfy 1013 * the request. The only alterative is to fail the allocation but 1014 * it's preferable try. The use of KM_NOSLEEP is expected to be rare. 1015 */ 1016 if (flags & KM_NOSLEEP) 1017 return (spl_emergency_alloc(skc, flags, obj)); 1018 1019 might_sleep(); 1020 1021 /* 1022 * Before allocating a new slab wait for any reaping to complete and 1023 * then return so the local magazine can be rechecked for new objects. 1024 */ 1025 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { 1026 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, 1027 TASK_UNINTERRUPTIBLE); 1028 return (rc ? rc : -EAGAIN); 1029 } 1030 1031 /* 1032 * Note: It would be nice to reduce the overhead of context switch 1033 * and improve NUMA locality, by trying to allocate a new slab in the 1034 * current process context with KM_NOSLEEP flag. 1035 * 1036 * However, this can't be applied to vmem/kvmem due to a bug that 1037 * spl_vmalloc() doesn't honor gfp flags in page table allocation. 1038 */ 1039 1040 /* 1041 * This is handled by dispatching a work request to the global work 1042 * queue. This allows us to asynchronously allocate a new slab while 1043 * retaining the ability to safely fall back to a smaller synchronous 1044 * allocations to ensure forward progress is always maintained. 1045 */ 1046 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { 1047 spl_kmem_alloc_t *ska; 1048 1049 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); 1050 if (ska == NULL) { 1051 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); 1052 smp_mb__after_atomic(); 1053 wake_up_all(&skc->skc_waitq); 1054 return (-ENOMEM); 1055 } 1056 1057 atomic_inc(&skc->skc_ref); 1058 ska->ska_cache = skc; 1059 ska->ska_flags = flags; 1060 taskq_init_ent(&ska->ska_tqe); 1061 taskq_dispatch_ent(spl_kmem_cache_taskq, 1062 spl_cache_grow_work, ska, 0, &ska->ska_tqe); 1063 } 1064 1065 /* 1066 * The goal here is to only detect the rare case where a virtual slab 1067 * allocation has deadlocked. We must be careful to minimize the use 1068 * of emergency objects which are more expensive to track. Therefore, 1069 * we set a very long timeout for the asynchronous allocation and if 1070 * the timeout is reached the cache is flagged as deadlocked. From 1071 * this point only new emergency objects will be allocated until the 1072 * asynchronous allocation completes and clears the deadlocked flag. 1073 */ 1074 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { 1075 rc = spl_emergency_alloc(skc, flags, obj); 1076 } else { 1077 remaining = wait_event_timeout(skc->skc_waitq, 1078 spl_cache_grow_wait(skc), HZ / 10); 1079 1080 if (!remaining) { 1081 spin_lock(&skc->skc_lock); 1082 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { 1083 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 1084 skc->skc_obj_deadlock++; 1085 } 1086 spin_unlock(&skc->skc_lock); 1087 } 1088 1089 rc = -ENOMEM; 1090 } 1091 1092 return (rc); 1093 } 1094 1095 /* 1096 * Refill a per-cpu magazine with objects from the slabs for this cache. 1097 * Ideally the magazine can be repopulated using existing objects which have 1098 * been released, however if we are unable to locate enough free objects new 1099 * slabs of objects will be created. On success NULL is returned, otherwise 1100 * the address of a single emergency object is returned for use by the caller. 1101 */ 1102 static void * 1103 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) 1104 { 1105 spl_kmem_slab_t *sks; 1106 int count = 0, rc, refill; 1107 void *obj = NULL; 1108 1109 ASSERT(skc->skc_magic == SKC_MAGIC); 1110 ASSERT(skm->skm_magic == SKM_MAGIC); 1111 1112 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); 1113 spin_lock(&skc->skc_lock); 1114 1115 while (refill > 0) { 1116 /* No slabs available we may need to grow the cache */ 1117 if (list_empty(&skc->skc_partial_list)) { 1118 spin_unlock(&skc->skc_lock); 1119 1120 local_irq_enable(); 1121 rc = spl_cache_grow(skc, flags, &obj); 1122 local_irq_disable(); 1123 1124 /* Emergency object for immediate use by caller */ 1125 if (rc == 0 && obj != NULL) 1126 return (obj); 1127 1128 if (rc) 1129 goto out; 1130 1131 /* Rescheduled to different CPU skm is not local */ 1132 if (skm != skc->skc_mag[smp_processor_id()]) 1133 goto out; 1134 1135 /* 1136 * Potentially rescheduled to the same CPU but 1137 * allocations may have occurred from this CPU while 1138 * we were sleeping so recalculate max refill. 1139 */ 1140 refill = MIN(refill, skm->skm_size - skm->skm_avail); 1141 1142 spin_lock(&skc->skc_lock); 1143 continue; 1144 } 1145 1146 /* Grab the next available slab */ 1147 sks = list_entry((&skc->skc_partial_list)->next, 1148 spl_kmem_slab_t, sks_list); 1149 ASSERT(sks->sks_magic == SKS_MAGIC); 1150 ASSERT(sks->sks_ref < sks->sks_objs); 1151 ASSERT(!list_empty(&sks->sks_free_list)); 1152 1153 /* 1154 * Consume as many objects as needed to refill the requested 1155 * cache. We must also be careful not to overfill it. 1156 */ 1157 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && 1158 ++count) { 1159 ASSERT(skm->skm_avail < skm->skm_size); 1160 ASSERT(count < skm->skm_size); 1161 skm->skm_objs[skm->skm_avail++] = 1162 spl_cache_obj(skc, sks); 1163 } 1164 1165 /* Move slab to skc_complete_list when full */ 1166 if (sks->sks_ref == sks->sks_objs) { 1167 list_del(&sks->sks_list); 1168 list_add(&sks->sks_list, &skc->skc_complete_list); 1169 } 1170 } 1171 1172 spin_unlock(&skc->skc_lock); 1173 out: 1174 return (NULL); 1175 } 1176 1177 /* 1178 * Release an object back to the slab from which it came. 1179 */ 1180 static void 1181 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) 1182 { 1183 spl_kmem_slab_t *sks = NULL; 1184 spl_kmem_obj_t *sko = NULL; 1185 1186 ASSERT(skc->skc_magic == SKC_MAGIC); 1187 1188 sko = spl_sko_from_obj(skc, obj); 1189 ASSERT(sko->sko_magic == SKO_MAGIC); 1190 sks = sko->sko_slab; 1191 ASSERT(sks->sks_magic == SKS_MAGIC); 1192 ASSERT(sks->sks_cache == skc); 1193 list_add(&sko->sko_list, &sks->sks_free_list); 1194 1195 sks->sks_age = jiffies; 1196 sks->sks_ref--; 1197 skc->skc_obj_alloc--; 1198 1199 /* 1200 * Move slab to skc_partial_list when no longer full. Slabs 1201 * are added to the head to keep the partial list is quasi-full 1202 * sorted order. Fuller at the head, emptier at the tail. 1203 */ 1204 if (sks->sks_ref == (sks->sks_objs - 1)) { 1205 list_del(&sks->sks_list); 1206 list_add(&sks->sks_list, &skc->skc_partial_list); 1207 } 1208 1209 /* 1210 * Move empty slabs to the end of the partial list so 1211 * they can be easily found and freed during reclamation. 1212 */ 1213 if (sks->sks_ref == 0) { 1214 list_del(&sks->sks_list); 1215 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 1216 skc->skc_slab_alloc--; 1217 } 1218 } 1219 1220 /* 1221 * Allocate an object from the per-cpu magazine, or if the magazine 1222 * is empty directly allocate from a slab and repopulate the magazine. 1223 */ 1224 void * 1225 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) 1226 { 1227 spl_kmem_magazine_t *skm; 1228 void *obj = NULL; 1229 1230 ASSERT0(flags & ~KM_PUBLIC_MASK); 1231 ASSERT(skc->skc_magic == SKC_MAGIC); 1232 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1233 1234 /* 1235 * Allocate directly from a Linux slab. All optimizations are left 1236 * to the underlying cache we only need to guarantee that KM_SLEEP 1237 * callers will never fail. 1238 */ 1239 if (skc->skc_flags & KMC_SLAB) { 1240 struct kmem_cache *slc = skc->skc_linux_cache; 1241 do { 1242 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); 1243 } while ((obj == NULL) && !(flags & KM_NOSLEEP)); 1244 1245 if (obj != NULL) { 1246 /* 1247 * Even though we leave everything up to the 1248 * underlying cache we still keep track of 1249 * how many objects we've allocated in it for 1250 * better debuggability. 1251 */ 1252 percpu_counter_inc(&skc->skc_linux_alloc); 1253 } 1254 goto ret; 1255 } 1256 1257 local_irq_disable(); 1258 1259 restart: 1260 /* 1261 * Safe to update per-cpu structure without lock, but 1262 * in the restart case we must be careful to reacquire 1263 * the local magazine since this may have changed 1264 * when we need to grow the cache. 1265 */ 1266 skm = skc->skc_mag[smp_processor_id()]; 1267 ASSERT(skm->skm_magic == SKM_MAGIC); 1268 1269 if (likely(skm->skm_avail)) { 1270 /* Object available in CPU cache, use it */ 1271 obj = skm->skm_objs[--skm->skm_avail]; 1272 } else { 1273 obj = spl_cache_refill(skc, skm, flags); 1274 if ((obj == NULL) && !(flags & KM_NOSLEEP)) 1275 goto restart; 1276 1277 local_irq_enable(); 1278 goto ret; 1279 } 1280 1281 local_irq_enable(); 1282 ASSERT(obj); 1283 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 1284 1285 ret: 1286 /* Pre-emptively migrate object to CPU L1 cache */ 1287 if (obj) { 1288 if (obj && skc->skc_ctor) 1289 skc->skc_ctor(obj, skc->skc_private, flags); 1290 else 1291 prefetchw(obj); 1292 } 1293 1294 return (obj); 1295 } 1296 EXPORT_SYMBOL(spl_kmem_cache_alloc); 1297 1298 /* 1299 * Free an object back to the local per-cpu magazine, there is no 1300 * guarantee that this is the same magazine the object was originally 1301 * allocated from. We may need to flush entire from the magazine 1302 * back to the slabs to make space. 1303 */ 1304 void 1305 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) 1306 { 1307 spl_kmem_magazine_t *skm; 1308 unsigned long flags; 1309 int do_reclaim = 0; 1310 int do_emergency = 0; 1311 1312 ASSERT(skc->skc_magic == SKC_MAGIC); 1313 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1314 1315 /* 1316 * Run the destructor 1317 */ 1318 if (skc->skc_dtor) 1319 skc->skc_dtor(obj, skc->skc_private); 1320 1321 /* 1322 * Free the object from the Linux underlying Linux slab. 1323 */ 1324 if (skc->skc_flags & KMC_SLAB) { 1325 kmem_cache_free(skc->skc_linux_cache, obj); 1326 percpu_counter_dec(&skc->skc_linux_alloc); 1327 return; 1328 } 1329 1330 /* 1331 * While a cache has outstanding emergency objects all freed objects 1332 * must be checked. However, since emergency objects will never use 1333 * a virtual address these objects can be safely excluded as an 1334 * optimization. 1335 */ 1336 if (!is_vmalloc_addr(obj)) { 1337 spin_lock(&skc->skc_lock); 1338 do_emergency = (skc->skc_obj_emergency > 0); 1339 spin_unlock(&skc->skc_lock); 1340 1341 if (do_emergency && (spl_emergency_free(skc, obj) == 0)) 1342 return; 1343 } 1344 1345 local_irq_save(flags); 1346 1347 /* 1348 * Safe to update per-cpu structure without lock, but 1349 * no remote memory allocation tracking is being performed 1350 * it is entirely possible to allocate an object from one 1351 * CPU cache and return it to another. 1352 */ 1353 skm = skc->skc_mag[smp_processor_id()]; 1354 ASSERT(skm->skm_magic == SKM_MAGIC); 1355 1356 /* 1357 * Per-CPU cache full, flush it to make space for this object, 1358 * this may result in an empty slab which can be reclaimed once 1359 * interrupts are re-enabled. 1360 */ 1361 if (unlikely(skm->skm_avail >= skm->skm_size)) { 1362 spl_cache_flush(skc, skm, skm->skm_refill); 1363 do_reclaim = 1; 1364 } 1365 1366 /* Available space in cache, use it */ 1367 skm->skm_objs[skm->skm_avail++] = obj; 1368 1369 local_irq_restore(flags); 1370 1371 if (do_reclaim) 1372 spl_slab_reclaim(skc); 1373 } 1374 EXPORT_SYMBOL(spl_kmem_cache_free); 1375 1376 /* 1377 * Depending on how many and which objects are released it may simply 1378 * repopulate the local magazine which will then need to age-out. Objects 1379 * which cannot fit in the magazine will be released back to their slabs 1380 * which will also need to age out before being released. This is all just 1381 * best effort and we do not want to thrash creating and destroying slabs. 1382 */ 1383 void 1384 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) 1385 { 1386 ASSERT(skc->skc_magic == SKC_MAGIC); 1387 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1388 1389 if (skc->skc_flags & KMC_SLAB) 1390 return; 1391 1392 atomic_inc(&skc->skc_ref); 1393 1394 /* 1395 * Prevent concurrent cache reaping when contended. 1396 */ 1397 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) 1398 goto out; 1399 1400 /* Reclaim from the magazine and free all now empty slabs. */ 1401 unsigned long irq_flags; 1402 local_irq_save(irq_flags); 1403 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; 1404 spl_cache_flush(skc, skm, skm->skm_avail); 1405 local_irq_restore(irq_flags); 1406 1407 spl_slab_reclaim(skc); 1408 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); 1409 smp_mb__after_atomic(); 1410 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); 1411 out: 1412 atomic_dec(&skc->skc_ref); 1413 } 1414 EXPORT_SYMBOL(spl_kmem_cache_reap_now); 1415 1416 /* 1417 * This is stubbed out for code consistency with other platforms. There 1418 * is existing logic to prevent concurrent reaping so while this is ugly 1419 * it should do no harm. 1420 */ 1421 int 1422 spl_kmem_cache_reap_active(void) 1423 { 1424 return (0); 1425 } 1426 EXPORT_SYMBOL(spl_kmem_cache_reap_active); 1427 1428 /* 1429 * Reap all free slabs from all registered caches. 1430 */ 1431 void 1432 spl_kmem_reap(void) 1433 { 1434 spl_kmem_cache_t *skc = NULL; 1435 1436 down_read(&spl_kmem_cache_sem); 1437 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { 1438 spl_kmem_cache_reap_now(skc); 1439 } 1440 up_read(&spl_kmem_cache_sem); 1441 } 1442 EXPORT_SYMBOL(spl_kmem_reap); 1443 1444 int 1445 spl_kmem_cache_init(void) 1446 { 1447 init_rwsem(&spl_kmem_cache_sem); 1448 INIT_LIST_HEAD(&spl_kmem_cache_list); 1449 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", 1450 spl_kmem_cache_kmem_threads, maxclsyspri, 1451 spl_kmem_cache_kmem_threads * 8, INT_MAX, 1452 TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 1453 1454 if (spl_kmem_cache_taskq == NULL) 1455 return (-ENOMEM); 1456 1457 return (0); 1458 } 1459 1460 void 1461 spl_kmem_cache_fini(void) 1462 { 1463 taskq_destroy(spl_kmem_cache_taskq); 1464 } 1465