Home
last modified time | relevance | path

Searched refs:pool (Results 1 – 25 of 799) sorted by relevance

12345678910>>...32

/linux/net/core/
H A Dpage_pool.c192 memcpy(&pool->p, &params->fast, sizeof(pool->p)); in page_pool_init()
362 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
378 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
393 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
519 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
540 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
549 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
679 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
936 pool->disconnect(pool); in __page_pool_destroy()
955 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
[all …]
H A Dpage_pool_user.c38 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
44 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
56 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
84 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
229 if (pool->user.napi_id && in page_pool_nl_fill()
307 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, in page_pool_list()
313 if (pool->slow.netdev) { in page_pool_list()
316 pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0; in page_pool_list()
349 struct page_pool *pool; in page_pool_unreg_netdev_wipe() local
370 pool->slow.netdev = lo; in page_pool_unreg_netdev()
[all …]
/linux/net/xdp/
H A Dxsk_buff_pool.c37 if (!pool) in xp_destroy()
42 kvfree(pool); in xp_destroy()
65 if (!pool) in xp_create_and_assign_umem()
102 xskb->pool = pool; in xp_create_and_assign_umem()
109 xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); in xp_create_and_assign_umem()
215 bpf.xsk.pool = pool; in xp_assign_dev()
249 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
265 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev()
300 if (!pool) in xp_put_pool()
487 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned()
[all …]
/linux/mm/
H A Dmempool.c139 BUG_ON(pool->curr_nr >= pool->min_nr); in add_element()
142 pool->elements[pool->curr_nr++] = element; in add_element()
147 void *element = pool->elements[--pool->curr_nr]; in remove_element()
170 pool->free(element, pool->pool_data); in mempool_exit()
214 while (pool->curr_nr < pool->min_nr) { in mempool_init_node()
217 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
322 pool->free(element, pool->pool_data); in mempool_resize()
349 while (pool->curr_nr < pool->min_nr) { in mempool_resize()
355 if (pool->curr_nr < pool->min_nr) { in mempool_resize()
540 if (likely(pool->curr_nr < pool->min_nr)) { in mempool_free()
[all …]
H A Ddmapool.c83 pool->name, pool->nr_active, in pools_show()
84 pool->nr_blocks, pool->size, in pools_show()
187 pool->nr_active++; in pool_block_pop()
306 while (offset + pool->size <= pool->allocation) { in pool_initialise_page()
331 pool->nr_pages++; in pool_initialise_page()
342 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
378 dev_err(pool->dev, "%s %s busy\n", __func__, pool->name); in dma_pool_destroy()
384 dma_free_coherent(pool->dev, pool->allocation, in dma_pool_destroy()
390 kfree(pool); in dma_pool_destroy()
503 if (pool) in dmam_pool_create()
[all …]
H A Dzbud.c202 struct zbud_pool *pool; in zbud_create_pool() local
206 if (!pool) in zbud_create_pool()
212 pool->pages_nr = 0; in zbud_create_pool()
213 return pool; in zbud_create_pool()
224 kfree(pool); in zbud_destroy_pool()
259 spin_lock(&pool->lock); in zbud_alloc()
280 spin_lock(&pool->lock); in zbud_alloc()
281 pool->pages_nr++; in zbud_alloc()
316 spin_lock(&pool->lock); in zbud_free()
331 pool->pages_nr--; in zbud_free()
[all …]
/linux/drivers/net/ethernet/ti/
H A Dk3-cppi-desc-pool.c30 if (!pool) in k3_cppi_desc_pool_destroy()
38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
45 kfree(pool); in k3_cppi_desc_pool_destroy()
58 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in k3_cppi_desc_pool_create_name()
59 if (!pool) in k3_cppi_desc_pool_create_name()
65 pool->mem_size = pool->num_desc * pool->desc_size; in k3_cppi_desc_pool_create_name()
80 pool->desc_infos = kcalloc(pool->num_desc, in k3_cppi_desc_pool_create_name()
85 pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size, in k3_cppi_desc_pool_create_name()
92 (phys_addr_t)pool->dma_addr, pool->mem_size, in k3_cppi_desc_pool_create_name()
99 return pool; in k3_cppi_desc_pool_create_name()
[all …]
/linux/drivers/md/
H A Ddm-thin.c621 struct pool *pool = tc->pool; in requeue_deferred_cells() local
674 struct pool *pool = tc->pool; in get_bio_block() local
691 struct pool *pool = tc->pool; in get_bio_block_range() local
716 struct pool *pool = tc->pool; in remap() local
754 struct pool *pool = tc->pool; in issue() local
882 struct pool *pool = tc->pool; in cell_defer_no_holder() local
962 struct pool *pool = tc->pool; in complete_overwrite_bio() local
995 struct pool *pool = tc->pool; in process_prepared_mapping() local
1087 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local
1151 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local
[all …]
/linux/net/ceph/
H A Dmsgpool.c17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
34 msg->pool = NULL; in msgpool_free()
43 pool->type = type; in ceph_msgpool_init()
46 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); in ceph_msgpool_init()
47 if (!pool->pool) in ceph_msgpool_init()
49 pool->name = name; in ceph_msgpool_init()
56 mempool_destroy(pool->pool); in ceph_msgpool_destroy()
68 pool->front_len, pool->max_data_items); in ceph_msgpool_get()
76 msg = mempool_alloc(pool->pool, GFP_NOFS); in ceph_msgpool_get()
[all …]
/linux/lib/
H A Dobjpool.c40 pool->nr_objs++; in objpool_init_percpu_slot()
103 if (!pool->cpu_slots) in objpool_fini_percpu_slots()
140 pool->cpu_slots = kzalloc(slot_size, pool->gfp); in objpool_init()
141 if (!pool->cpu_slots) in objpool_init()
149 refcount_set(&pool->ref, pool->nr_objs + 1); in objpool_init()
158 if (!pool->cpu_slots) in objpool_free()
165 if (pool->release) in objpool_free()
166 pool->release(pool, pool->context); in objpool_free()
173 if (!obj || !pool) in objpool_drop()
177 objpool_free(pool); in objpool_drop()
[all …]
/linux/sound/core/seq/
H A Dseq_memory.c24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
239 pool = cell->pool; in snd_seq_cell_free()
289 while (pool->free == NULL && ! nonblock && ! pool->closing) { in snd_seq_cell_alloc()
461 cellptr->pool = pool; in snd_seq_pool_init()
465 pool->room = (pool->size + 1) / 2; in snd_seq_pool_init()
469 pool->total_elements = pool->size; in snd_seq_pool_init()
520 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in snd_seq_pool_new()
521 if (!pool) in snd_seq_pool_new()
535 return pool; in snd_seq_pool_new()
[all …]
/linux/include/net/
H A Dxdp_sock_drv.h42 return pool->chunk_size; in xsk_pool_get_chunk_size()
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); in xsk_pool_get_rx_frame_size()
53 xp_set_rxq_info(pool, rxq); in xsk_pool_set_rxq_info()
59 xp_fill_cb(pool, desc); in xsk_pool_fill_cb()
74 xp_dma_unmap(pool, attrs); in xsk_pool_dma_unmap()
80 struct xdp_umem *umem = pool->umem; in xsk_pool_dma_map()
101 return xp_alloc(pool); in xsk_buff_alloc()
117 return xp_can_alloc(pool, count); in xsk_buff_can_alloc()
190 return xp_raw_get_dma(pool, addr); in xsk_buff_raw_get_dma()
212 if (!pool->tx_metadata_len) in xsk_buff_get_metadata()
[all …]
H A Dxsk_buff_pool.h30 struct xsk_buff_pool *pool; member
124 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; in xp_init_xskb_addr()
159 xskb->pool->frame_len, in xp_dma_sync_for_cpu()
184 return pool->dma_pages && in xp_desc_crosses_non_contig_pg()
195 return addr & pool->chunk_mask; in xp_aligned_extract_addr()
216 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; in xp_aligned_extract_idx()
221 if (xskb->pool->unaligned) in xp_release()
222 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; in xp_release()
229 offset += xskb->pool->headroom; in xp_get_handle()
230 if (!xskb->pool->unaligned) in xp_get_handle()
[all …]
/linux/drivers/staging/media/atomisp/pci/runtime/rmgr/src/
H A Drmgr_vbuf.c134 assert(pool); in ia_css_rmgr_init_vbuf()
135 if (!pool) in ia_css_rmgr_init_vbuf()
138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf()
142 pool->size; in ia_css_rmgr_init_vbuf()
144 if (pool->handles) in ia_css_rmgr_init_vbuf()
150 pool->size = 0; in ia_css_rmgr_init_vbuf()
151 pool->handles = NULL; in ia_css_rmgr_init_vbuf()
166 if (!pool) { in ia_css_rmgr_uninit_vbuf()
170 if (pool->handles) { in ia_css_rmgr_uninit_vbuf()
203 assert(pool); in rmgr_push_handle()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_icm_pool.c280 buddy->pool = pool; in dr_icm_buddy_create()
291 pool->dmn->num_buddies[pool->icm_type]++; in dr_icm_buddy_create()
343 return pool->hot_memory_size > pool->th; in dr_icm_pool_is_sync_required()
470 struct mlx5dr_icm_pool *pool = buddy->pool; in mlx5dr_icm_free_chunk() local
481 hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++]; in mlx5dr_icm_free_chunk()
512 pool = kvzalloc(sizeof(*pool), GFP_KERNEL); in mlx5dr_icm_pool_create()
513 if (!pool) in mlx5dr_icm_pool_create()
516 pool->dmn = dmn; in mlx5dr_icm_pool_create()
557 return pool; in mlx5dr_icm_pool_create()
560 kvfree(pool); in mlx5dr_icm_pool_create()
[all …]
H A Ddr_arg.c62 pool->dmn->pdn, in dr_arg_pool_alloc_objs()
102 mutex_lock(&pool->mutex); in dr_arg_pool_get_arg_obj()
125 mutex_lock(&pool->mutex); in dr_arg_pool_put_arg_obj()
133 struct dr_arg_pool *pool; in dr_arg_pool_create() local
135 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in dr_arg_pool_create()
136 if (!pool) in dr_arg_pool_create()
139 pool->dmn = dmn; in dr_arg_pool_create()
142 mutex_init(&pool->mutex); in dr_arg_pool_create()
148 return pool; in dr_arg_pool_create()
151 kfree(pool); in dr_arg_pool_create()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_heap.c323 if (!pool->vm) { in panthor_heap_create()
491 kfree(pool); in panthor_heap_pool_release()
500 if (pool) in panthor_heap_pool_put()
513 if (pool) in panthor_heap_pool_get()
516 return pool; in panthor_heap_pool_get()
537 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in panthor_heap_pool_create()
538 if (!pool) in panthor_heap_pool_create()
544 pool->vm = vm; in panthor_heap_pool_create()
563 return pool; in panthor_heap_pool_create()
590 if (!pool) in panthor_heap_pool_destroy()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dce80/
H A Ddce80_resource.c933 *pool = NULL; in dce80_destroy_resource_pool()
1044 if (!pool->base.irqs) in dce80_construct()
1138 if (!pool) in dce80_create_resource_pool()
1142 return &pool->base; in dce80_create_resource_pool()
1144 kfree(pool); in dce80_create_resource_pool()
1244 if (!pool->base.irqs) in dce81_construct()
1338 if (!pool) in dce81_create_resource_pool()
1342 return &pool->base; in dce81_create_resource_pool()
1344 kfree(pool); in dce81_create_resource_pool()
1535 if (!pool) in dce83_create_resource_pool()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dirq_affinity.c10 pool->irqs_per_cpu[cpu]--; in cpu_put()
15 pool->irqs_per_cpu[cpu]++; in cpu_get()
27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded()
33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded()
42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded()
54 err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); in irq_pool_request_irq()
57 if (pool->irqs_per_cpu) { in irq_pool_request_irq()
125 mutex_lock(&pool->lock); in mlx5_irq_affinity_request()
155 mutex_unlock(&pool->lock); in mlx5_irq_affinity_request()
168 if (pool->irqs_per_cpu) in mlx5_irq_affinity_irq_release()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/dce60/
H A Ddce60_resource.c927 *pool = NULL; in dce60_destroy_resource_pool()
1031 if (!pool->base.irqs) in dce60_construct()
1125 if (!pool) in dce60_create_resource_pool()
1129 return &pool->base; in dce60_create_resource_pool()
1131 kfree(pool); in dce60_create_resource_pool()
1323 if (!pool) in dce61_create_resource_pool()
1327 return &pool->base; in dce61_create_resource_pool()
1329 kfree(pool); in dce61_create_resource_pool()
1517 if (!pool) in dce64_create_resource_pool()
1521 return &pool->base; in dce64_create_resource_pool()
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dpage_alloc.c45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck()
103 if (phys < pool->range_start || phys >= pool->range_end) in __hyp_attach_page()
156 __hyp_attach_page(pool, p); in __hyp_put_page()
170 hyp_spin_lock(&pool->lock); in hyp_put_page()
171 __hyp_put_page(pool, p); in hyp_put_page()
172 hyp_spin_unlock(&pool->lock); in hyp_put_page()
179 hyp_spin_lock(&pool->lock); in hyp_get_page()
203 hyp_spin_lock(&pool->lock); in hyp_alloc_pages()
206 while (i <= pool->max_order && list_empty(&pool->free_area[i])) in hyp_alloc_pages()
208 if (i > pool->max_order) { in hyp_alloc_pages()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument
412 pool->avail_deks--; in mlx5_crypto_dek_pool_pop()
413 pool->in_use_deks++; in mlx5_crypto_dek_pool_pop()
460 if (pool->syncing) in mlx5_crypto_dek_pool_push()
595 err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose)); in mlx5_crypto_dek_sync_work_fn()
676 mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list, in mlx5_crypto_dek_destroy_work_fn()
686 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in mlx5_crypto_dek_pool_create()
687 if (!pool) in mlx5_crypto_dek_pool_create()
690 pool->mdev = mdev; in mlx5_crypto_dek_pool_create()
704 return pool; in mlx5_crypto_dek_pool_create()
[all …]
/linux/net/rds/
H A Dib_rdma.c275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
423 &pool->clean_list); in rds_ib_flush_mr_pool()
450 if (atomic_inc_return(&pool->item_count) <= pool->max_items) in rds_ib_try_reuse_ibmr()
487 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr() local
509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || in rds_ib_free_mr()
510 atomic_read(&pool->dirty_count) >= pool->max_items / 5) in rds_ib_free_mr()
640 kfree(pool); in rds_ib_destroy_mr_pool()
648 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool()
649 if (!pool) in rds_ib_create_mr_pool()
671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4; in rds_ib_create_mr_pool()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c127 pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), in mlxsw_sp_counter_pool_init()
129 if (!pool) in mlxsw_sp_counter_pool_init()
134 flex_array_size(pool, sub_pools, pool->sub_pools_count)); in mlxsw_sp_counter_pool_init()
139 &pool->pool_size); in mlxsw_sp_counter_pool_init()
145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init()
146 if (!pool->usage) { in mlxsw_sp_counter_pool_init()
158 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_init()
163 kfree(pool); in mlxsw_sp_counter_pool_init()
173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini()
176 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_fini()
[all …]
/linux/include/net/page_pool/
H A Dhelpers.h94 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
127 return page_pool_alloc_pages(pool, gfp); in page_pool_alloc()
138 if (pool->frag_offset + *size > max_size) { in page_pool_alloc()
140 pool->frag_offset = max_size; in page_pool_alloc()
199 return page_pool_alloc_va(pool, size, gfp); in page_pool_dev_alloc_va()
212 return pool->p.dma_dir; in page_pool_get_dma_dir()
348 page_pool_put_full_page(pool, page, true); in page_pool_recycle_direct()
416 dma_sync_single_range_for_cpu(pool->p.dev, in page_pool_dma_sync_for_cpu()
419 page_pool_get_dma_dir(pool)); in page_pool_dma_sync_for_cpu()
429 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()
[all …]

12345678910>>...32