/linux/mm/kasan/ |
H A D | common.c | 464 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc() 519 if (check_slab_allocation(slab->slab_cache, ptr, ip)) in __kasan_mempool_poison_object() 522 poison_slab_object(slab->slab_cache, ptr, false, false); in __kasan_mempool_poison_object() 547 unpoison_slab_object(slab->slab_cache, ptr, flags, false); in __kasan_mempool_unpoison_object() 550 if (is_kmalloc_cache(slab->slab_cache)) in __kasan_mempool_unpoison_object() 551 poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags); in __kasan_mempool_unpoison_object()
|
H A D | quarantine.c | 131 return virt_to_slab(qlink)->slab_cache; in qlink_to_cache()
|
H A D | generic.c | 534 cache = slab->slab_cache; in __kasan_record_aux_stack()
|
H A D | report.c | 508 info->cache = slab->slab_cache; in complete_report_info()
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_active.c | 24 static struct kmem_cache *slab_cache; variable 172 kmem_cache_free(slab_cache, it); in __active_retire() 320 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC); in active_instance() 766 kmem_cache_free(slab_cache, ref->cache); in i915_active_fini() 886 node = kmem_cache_alloc(slab_cache, GFP_KERNEL); in i915_active_acquire_preallocate_barrier() 934 kmem_cache_free(slab_cache, node); in i915_active_acquire_preallocate_barrier() 1198 kmem_cache_destroy(slab_cache); in i915_active_module_exit() 1203 slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); in i915_active_module_init() 1204 if (!slab_cache) in i915_active_module_init()
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | split_page_table_lock.rst | 62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
|
/linux/mm/ |
H A D | slab.h | 55 struct kmem_cache *slab_cache; member 98 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
|
H A D | slub.c | 1630 } else if (!slab->slab_cache) { in free_consistency_checks() 2202 s = slab->slab_cache; in memcg_slab_post_charge() 2598 slab->slab_cache = s; in allocate_slab() 2655 __free_slab(slab->slab_cache, slab); in rcu_free_slab() 4624 s = slab->slab_cache; in slab_free_after_rcu_debug() 4648 return slab->slab_cache; in virt_to_cache() 4726 s = slab->slab_cache; in kfree() 4771 df->s = df->slab->slab_cache; in build_detached_freelist() 5624 s = slab->slab_cache; in __check_heap_object() 5879 p->slab_cache = s; in bootstrap() [all …]
|
H A D | slab_common.c | 997 skip_orig_size_check(folio_slab(folio)->slab_cache, object); in __ksize() 1000 return slab_ksize(folio_slab(folio)->slab_cache); in __ksize()
|
H A D | memcontrol.c | 2423 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
|
/linux/tools/cgroup/ |
H A D | memcg_slabinfo.py | 198 cache = slab.slab_cache
|
/linux/drivers/md/ |
H A D | dm-bufio.c | 991 struct kmem_cache *slab_cache; member 1179 if (unlikely(c->slab_cache != NULL)) { in alloc_buffer_data() 1181 return kmem_cache_alloc(c->slab_cache, gfp_mask); in alloc_buffer_data() 1204 kmem_cache_free(c->slab_cache, data); in free_buffer_data() 2526 c->slab_cache = kmem_cache_create(slab_name, block_size, align, in dm_bufio_client_create() 2528 if (!c->slab_cache) { in dm_bufio_client_create() 2587 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_create() 2638 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_destroy()
|
H A D | raid5.h | 637 struct kmem_cache *slab_cache; /* for allocating stripes */ member
|
H A D | raid5.c | 2393 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe() 2399 free_stripe(conf->slab_cache, sh); in grow_one_stripe() 2432 conf->slab_cache = sc; in grow_stripes() 2605 free_stripe(conf->slab_cache, osh); in resize_stripes() 2613 kmem_cache_destroy(conf->slab_cache); in resize_stripes() 2643 conf->slab_cache = sc; in resize_stripes() 2701 free_stripe(conf->slab_cache, sh); in drop_one_stripe() 2713 kmem_cache_destroy(conf->slab_cache); in shrink_stripes() 2714 conf->slab_cache = NULL; in shrink_stripes()
|
/linux/Documentation/mm/ |
H A D | split_page_table_lock.rst | 65 allocation: slab uses page->slab_cache for its pages.
|
/linux/mm/kfence/ |
H A D | core.c | 488 slab->slab_cache = cache; in kfence_guarded_alloc()
|