/freebsd/contrib/jemalloc/src/ |
H A D | arena.c | 1039 return slab; in arena_bin_slabs_nonfull_tryget() 1066 extent_t *slab; in arena_bin_reset() local 1209 extent_t *slab; in arena_slab_alloc_hard() local 1225 return slab; in arena_slab_alloc_hard() 1262 return slab; in arena_slab_alloc() 1268 extent_t *slab; in arena_bin_nonfull_slab_get() local 1274 return slab; in arena_bin_nonfull_slab_get() 1291 return slab; in arena_bin_nonfull_slab_get() 1301 return slab; in arena_bin_nonfull_slab_get() 1312 extent_t *slab; in arena_bin_malloc_hard() local [all …]
|
H A D | extent.c | 726 slab); in extent_rtree_write_acquired() 801 if (slab) { in extent_register_impl() 1007 slab, growing_retained); in extent_split_interior() 1134 assert(new_addr == NULL || !slab); in extent_recycle() 1135 assert(pad == 0 || !slab); in extent_recycle() 1136 assert(!*zero || !slab); in extent_recycle() 1178 if (slab) { in extent_recycle() 1179 extent_slab_set(extent, slab); in extent_recycle() 1301 assert(pad == 0 || !slab); in extent_grow_retained() 1302 assert(!*zero || !slab); in extent_grow_retained() [all …]
|
H A D | jemalloc.c | 2125 alloc_ctx.slab = (usize in imalloc_body() 2136 alloc_ctx.slab = false; in imalloc_body() 2575 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in ifree() 2619 alloc_ctx.slab = true; in isfree() 2626 &dbg_ctx.slab); in isfree() 2628 assert(dbg_ctx.slab == alloc_ctx.slab); in isfree() 2633 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in isfree() 2810 &alloc_ctx.szind, &alloc_ctx.slab); in free_fastpath() 2813 if (!res || !alloc_ctx.slab) { in free_fastpath() 3244 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in je_rallocx() [all …]
|
/freebsd/contrib/jemalloc/include/jemalloc/internal/ |
H A D | arena_inlines_b.h | 249 bool slab; in arena_dalloc_no_tcache() local 261 if (likely(slab)) { in arena_dalloc_no_tcache() 297 bool slab; in arena_dalloc() local 301 slab = alloc_ctx->slab; in arena_dalloc() 318 if (likely(slab)) { in arena_dalloc() 333 bool slab; in arena_sdalloc_no_tcache() local 362 if (likely(slab)) { in arena_sdalloc_no_tcache() 383 bool slab; in arena_sdalloc() local 393 &local_ctx.slab); in arena_sdalloc() 397 slab = alloc_ctx->slab; in arena_sdalloc() [all …]
|
H A D | rtree.h | 281 rtree_leaf_elm_t *elm, bool slab) { in rtree_leaf_elm_slab_write() argument 287 (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); in rtree_leaf_elm_slab_write() 290 atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); in rtree_leaf_elm_slab_write() 300 ((uintptr_t)slab); in rtree_leaf_elm_write() 303 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_write() 315 rtree_leaf_elm_t *elm, szind_t szind, bool slab) { in rtree_leaf_elm_szind_slab_update() argument 316 assert(!slab || szind < SC_NBINS); in rtree_leaf_elm_szind_slab_update() 322 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_szind_slab_update() 387 extent_t *extent, szind_t szind, bool slab) { in rtree_write() argument 512 uintptr_t key, szind_t szind, bool slab) { in rtree_szind_slab_update() argument [all …]
|
H A D | extent_inlines.h | 331 extent_slab_set(extent_t *extent, bool slab) { in extent_slab_set() argument 333 ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); in extent_slab_set() 368 bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, in extent_init() argument 370 assert(addr == PAGE_ADDR2BASE(addr) || !slab); in extent_init() 375 extent_slab_set(extent, slab); in extent_init()
|
H A D | extent_externs.h | 40 size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, 51 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
|
H A D | arena_structs_b.h | 229 bool slab; member
|
H A D | arena_externs.h | 33 size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
/freebsd/contrib/unbound/util/storage/ |
H A D | slabhash.c | 236 size_t slab, cnt = 0; in count_slabhash_entries() local 238 for(slab=0; slab<sh->size; slab++) { in count_slabhash_entries() 239 lock_quick_lock(&sh->array[slab]->lock); in count_slabhash_entries() 240 cnt += sh->array[slab]->num; in count_slabhash_entries() 241 lock_quick_unlock(&sh->array[slab]->lock); in count_slabhash_entries() 248 size_t slab, cnt = 0, max_collisions = 0; in get_slabhash_stats() local 250 for(slab=0; slab<sh->size; slab++) { in get_slabhash_stats() 251 lock_quick_lock(&sh->array[slab]->lock); in get_slabhash_stats() 252 cnt += sh->array[slab]->num; in get_slabhash_stats() 254 max_collisions = sh->array[slab]->max_collisions; in get_slabhash_stats() [all …]
|
/freebsd/sys/vm/ |
H A D | uma_int.h | 404 slab_tohashslab(uma_slab_t slab) in slab_tohashslab() argument 411 slab_data(uma_slab_t slab, uma_keg_t keg) in slab_data() argument 417 return (slab_tohashslab(slab)->uhs_data); in slab_data() 421 slab_item(uma_slab_t slab, uma_keg_t keg, int index) in slab_item() argument 425 data = (uintptr_t)slab_data(slab, keg); in slab_item() 434 data = (uintptr_t)slab_data(slab, keg); in slab_item_index() 606 uma_hash_slab_t slab; in hash_sfind() local 613 return (&slab->uhs_slab); in hash_sfind() 624 return (p->plinks.uma.slab); in vtoslab() 633 *slab = p->plinks.uma.slab; in vtozoneslab() [all …]
|
H A D | uma_core.c | 1757 uma_slab_t slab; in keg_alloc_slab() local 1768 slab = NULL; in keg_alloc_slab() 1862 return (slab); in keg_alloc_slab() 3914 uma_slab_t slab; in keg_first_slab() local 3921 slab = NULL; in keg_first_slab() 3926 return (slab); in keg_first_slab() 3931 return (slab); in keg_first_slab() 3961 return (slab); in keg_fetch_free_slab() 4037 return (slab); in keg_fetch_slab() 4084 slab = NULL; in zone_import() [all …]
|
H A D | vm_page.h | 232 void *slab; member
|
/freebsd/contrib/unbound/validator/ |
H A D | val_kcache.c | 62 kcache->slab = slabhash_create(numtables, start_size, maxmem, in key_cache_create() 65 if(!kcache->slab) { in key_cache_create() 78 slabhash_delete(kcache->slab); in key_cache_delete() 90 slabhash_insert(kcache->slab, k->entry.hash, &k->entry, in key_cache_insert() 116 e = slabhash_lookup(kcache->slab, lookfor.entry.hash, &lookfor, wr); in key_cache_search() 154 return sizeof(*kcache) + slabhash_get_mem(kcache->slab); in key_cache_get_mem() 166 slabhash_remove(kcache->slab, lookfor.entry.hash, &lookfor); in key_cache_remove()
|
H A D | val_kcache.h | 56 struct slabhash* slab; member
|
/freebsd/sys/kern/ |
H A D | kern_malloc.c | 570 va = (uintptr_t)slab; in malloc_large_slab() 579 va = (uintptr_t)slab; in malloc_large_size() 904 uma_slab_t slab; in free() local 916 if (slab == NULL) in free() 944 uma_slab_t slab; in zfree() local 956 if (slab == NULL) in zfree() 985 uma_slab_t slab; in realloc() local 1015 KASSERT(slab != NULL, in realloc() 1019 if (!malloc_large_slab(slab)) in realloc() 1084 uma_slab_t slab; in malloc_usable_size() local [all …]
|
/freebsd/contrib/bc/src/ |
H A D | vector.c | 541 bc_slab_free(void* slab) in bc_slab_free() argument 543 free(((BcSlab*) slab)->s); in bc_slab_free() 549 BcSlab* slab; in bc_slabvec_init() local 556 slab = bc_vec_pushEmpty(v); in bc_slabvec_init() 557 bc_slab_init(slab); in bc_slabvec_init() 565 BcSlab slab; in bc_slabvec_strdup() local 580 slab.len = SIZE_MAX; in bc_slabvec_strdup() 581 slab.s = bc_vm_strdup(str); in bc_slabvec_strdup() 584 bc_vec_pushAt(v, &slab, v->len - 1); in bc_slabvec_strdup() 586 return slab.s; in bc_slabvec_strdup()
|
/freebsd/contrib/netbsd-tests/sys/uvm/ |
H A D | t_uvm_physseg.c | 495 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY() 514 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3)); in ATF_TC_BODY() 520 ATF_REQUIRE(pgs < slab || pgs > (slab + npages1 in ATF_TC_BODY() 577 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY() 698 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY() 775 struct vm_page *slab, *pgs; in ATF_TC_BODY() local 781 slab = malloc(sizeof(struct vm_page) * npages * 2); in ATF_TC_BODY() 830 struct vm_page *slab, *pgs; in ATF_TC_BODY() local 836 slab = malloc(sizeof(struct vm_page) * npages * 2); in ATF_TC_BODY() 868 struct vm_page *slab, *pgs; in ATF_TC_BODY() local [all …]
|
H A D | t_uvm_physseg_load.c | 543 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local 553 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY() 592 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local 602 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY() 641 struct vm_page *slab = malloc(sizeof(struct vm_page) in ATF_TC_BODY() local 651 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY() 690 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2)); in ATF_TC_BODY() local 699 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
|
/freebsd/contrib/unbound/daemon/ |
H A D | cachedump.c | 126 size_t slab; in dump_rrset_cache() local 128 for(slab=0; slab<r->table.size; slab++) { in dump_rrset_cache() 129 lock_quick_lock(&r->table.array[slab]->lock); in dump_rrset_cache() 130 if(!dump_rrset_lruhash(ssl, r->table.array[slab], in dump_rrset_cache() 132 lock_quick_unlock(&r->table.array[slab]->lock); in dump_rrset_cache() 135 lock_quick_unlock(&r->table.array[slab]->lock); in dump_rrset_cache() 283 size_t slab; in dump_msg_cache() local 285 for(slab=0; slab<sh->size; slab++) { in dump_msg_cache() 286 lock_quick_lock(&sh->array[slab]->lock); in dump_msg_cache() 288 lock_quick_unlock(&sh->array[slab]->lock); in dump_msg_cache() [all …]
|
H A D | stats.c | 302 s->svr.key_cache_count = (long long)count_slabhash_entries(worker->env.key_cache->slab); in server_stats_compile()
|
/freebsd/tools/test/stress2/misc/ |
H A D | uma_zalloc_arg.sh | 145 @@ -292,4 +294,143 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 146 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 293 @@ -427,6 +427,9 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
|
/freebsd/sys/contrib/openzfs/config/ |
H A D | kernel-kmem-cache.m4 | 8 #include <linux/slab.h>
|
H A D | kernel-kmem.m4 | 67 #include <linux/slab.h>
|
/freebsd/contrib/bc/include/ |
H A D | vector.h | 409 bc_slab_free(void* slab);
|