Home
last modified time | relevance | path

Searched refs:pool (Results 1 – 25 of 596) sorted by relevance

12345678910>>...24

/Linux-v5.15/net/core/
Dpage_pool.c29 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
34 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
37 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
40 if (pool->p.pool_size) in page_pool_init()
41 ring_qsize = pool->p.pool_size; in page_pool_init()
51 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
52 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
53 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
57 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
61 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
[all …]
/Linux-v5.15/net/xdp/
Dxsk_buff_pool.c11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->heads); in xp_destroy()
[all …]
/Linux-v5.15/mm/
Dmempool.c25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
28 const int nr = pool->curr_nr; in poison_error()
34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
51 poison_error(pool, element, size, i); in __check_element()
58 static void check_element(mempool_t *pool, void *element) in check_element() argument
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element()
62 __check_element(pool, element, ksize(element)); in check_element()
63 } else if (pool->free == mempool_free_pages) { in check_element()
65 int order = (int)(long)pool->pool_data; in check_element()
[all …]
Dzbud.c78 int (*evict)(struct zbud_pool *pool, unsigned long handle);
222 struct zbud_pool *pool; in zbud_create_pool() local
225 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool()
226 if (!pool) in zbud_create_pool()
228 spin_lock_init(&pool->lock); in zbud_create_pool()
230 INIT_LIST_HEAD(&pool->unbuddied[i]); in zbud_create_pool()
231 INIT_LIST_HEAD(&pool->buddied); in zbud_create_pool()
232 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool()
233 pool->pages_nr = 0; in zbud_create_pool()
234 pool->ops = ops; in zbud_create_pool()
[all …]
Ddmapool.c71 struct dma_pool *pool; in pools_show() local
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
85 spin_lock_irq(&pool->lock); in pools_show()
86 list_for_each_entry(page, &pool->page_list, page_list) { in pools_show()
90 spin_unlock_irq(&pool->lock); in pools_show()
94 pool->name, blocks, in pools_show()
95 pages * (pool->allocation / pool->size), in pools_show()
96 pool->size, pages); in pools_show()
203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
206 unsigned int next_boundary = pool->boundary; in pool_initialise_page()
[all …]
Dz3fold.c76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
93 unsigned long pool; /* back link */ member
121 struct z3fold_pool *pool; member
212 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument
217 slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots()
223 slots->pool = (unsigned long)pool; in alloc_slots()
232 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
310 if (test_bit(HANDLES_NOFREE, &slots->pool)) { in free_handle()
328 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local
332 kmem_cache_free(pool->c_handle, slots); in free_handle()
[all …]
Dzswap.c176 struct zswap_pool *pool; member
223 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
224 static int zswap_pool_get(struct zswap_pool *pool);
225 static void zswap_pool_put(struct zswap_pool *pool);
246 struct zswap_pool *pool; in zswap_update_total_size() local
251 list_for_each_entry_rcu(pool, &zswap_pools, list) in zswap_update_total_size()
252 total += zpool_get_total_size(pool->zpool); in zswap_update_total_size()
355 zpool_free(entry->pool->zpool, entry->handle); in zswap_free_entry()
356 zswap_pool_put(entry->pool); in zswap_free_entry()
447 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); in zswap_cpu_comp_prepare() local
[all …]
/Linux-v5.15/drivers/net/ethernet/ti/
Dk3-cppi-desc-pool.c27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
29 if (!pool) in k3_cppi_desc_pool_destroy()
32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
36 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
38 pool->dma_addr); in k3_cppi_desc_pool_destroy()
40 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy()
48 struct k3_cppi_desc_pool *pool; in k3_cppi_desc_pool_create_name() local
[all …]
/Linux-v5.15/drivers/md/
Ddm-thin.c229 struct pool { struct
289 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
291 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
293 return pool->pf.mode; in get_pool_mode()
296 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
306 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
309 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
315 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
317 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
326 struct pool *pool; member
[all …]
/Linux-v5.15/sound/core/seq/
Dseq_memory.c22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
165 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
168 cell->next = pool->free; in free_cell()
169 pool->free = cell; in free_cell()
170 atomic_dec(&pool->counter); in free_cell()
176 struct snd_seq_pool *pool; in snd_seq_cell_free() local
180 pool = cell->pool; in snd_seq_cell_free()
[all …]
/Linux-v5.15/drivers/infiniband/sw/rxe/
Drxe_pool.c89 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument
91 return rxe_type_info[pool->type].name; in pool_name()
94 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
99 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
105 pool->index.max_index = max; in rxe_pool_init_index()
106 pool->index.min_index = min; in rxe_pool_init_index()
109 pool->index.table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
110 if (!pool->index.table) { in rxe_pool_init_index()
115 pool->index.table_size = size; in rxe_pool_init_index()
116 bitmap_zero(pool->index.table, max - min + 1); in rxe_pool_init_index()
[all …]
/Linux-v5.15/include/net/
Dxdp_sock_drv.h14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
17 void xsk_tx_release(struct xsk_buff_pool *pool);
20 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
26 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument
[all …]
Dpage_pool.h137 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
139 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument
143 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
146 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
149 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() argument
155 return page_pool_alloc_frag(pool, offset, size, gfp); in page_pool_dev_alloc_frag()
162 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) in page_pool_get_dma_dir() argument
164 return pool->p.dma_dir; in page_pool_get_dma_dir()
172 void page_pool_destroy(struct page_pool *pool);
173 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
[all …]
/Linux-v5.15/net/ceph/
Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/Linux-v5.15/drivers/staging/media/atomisp/pci/runtime/rmgr/src/
Drmgr_vbuf.c128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument
134 assert(pool); in ia_css_rmgr_init_vbuf()
135 if (!pool) in ia_css_rmgr_init_vbuf()
138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf()
142 pool->size; in ia_css_rmgr_init_vbuf()
143 pool->handles = kvmalloc(bytes_needed, GFP_KERNEL); in ia_css_rmgr_init_vbuf()
144 if (pool->handles) in ia_css_rmgr_init_vbuf()
145 memset(pool->handles, 0, bytes_needed); in ia_css_rmgr_init_vbuf()
150 pool->size = 0; in ia_css_rmgr_init_vbuf()
151 pool->handles = NULL; in ia_css_rmgr_init_vbuf()
[all …]
/Linux-v5.15/drivers/gpu/drm/amd/display/dc/dce80/
Ddce80_resource.c801 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument
805 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct()
806 if (pool->base.opps[i] != NULL) in dce80_resource_destruct()
807 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct()
809 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct()
810 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct()
812 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct()
813 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct()
815 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct()
816 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct()
[all …]
/Linux-v5.15/drivers/gpu/drm/amd/display/dc/dce60/
Ddce60_resource.c796 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument
800 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct()
801 if (pool->base.opps[i] != NULL) in dce60_resource_destruct()
802 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct()
804 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct()
805 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct()
807 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct()
808 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct()
810 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct()
811 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct()
[all …]
/Linux-v5.15/net/rds/
Dib_rdma.c198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument
204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr()
205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr()
206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr()
209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr()
275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr()
281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument
285 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal()
342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument
[all …]
/Linux-v5.15/drivers/gpu/drm/i915/gt/
Dintel_gt_buffer_pool.c12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool) in to_gt() argument
14 return container_of(pool, struct intel_gt, buffer_pool); in to_gt()
18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument
28 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size()
29 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size()
31 return &pool->cache_list[n]; in bucket_for_size()
41 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument
48 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than()
49 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than()
54 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum_cnt.c54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get()
124 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local
[all …]
/Linux-v5.15/arch/arm64/kvm/hyp/nvhe/
Dpage_alloc.c33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument
45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck()
52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, in __find_buddy_avail() argument
56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail()
93 static void __hyp_attach_page(struct hyp_pool *pool, in __hyp_attach_page() argument
108 for (; (order + 1) < pool->max_order; order++) { in __hyp_attach_page()
109 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page()
121 page_add_to_list(p, &pool->free_area[order]); in __hyp_attach_page()
124 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, in __hyp_extract_page() argument
139 buddy = __find_buddy_nocheck(pool, p, p->order); in __hyp_extract_page()
[all …]
/Linux-v5.15/include/linux/
Dgenalloc.h52 void *data, struct gen_pool *pool,
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt()
119 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument
122 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add()
125 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
128 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, in gen_pool_alloc_owner() argument
131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner()
135 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, in gen_pool_alloc_algo() argument
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_icm_pool.c62 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument
64 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create()
74 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create()
76 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create()
77 pool->icm_type); in dr_icm_pool_mr_create()
79 if (pool->icm_type == DR_ICM_TYPE_STE) { in dr_icm_pool_mr_create()
93 mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); in dr_icm_pool_mr_create()
98 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, in dr_icm_pool_mr_create()
104 mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err); in dr_icm_pool_mr_create()
111 mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n", in dr_icm_pool_mr_create()
[all …]
/Linux-v5.15/lib/
Dgenalloc.c155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
162 pool->algo = gen_pool_first_fit; in gen_pool_create()
163 pool->data = NULL; in gen_pool_create()
164 pool->name = NULL; in gen_pool_create()
166 return pool; in gen_pool_create()
[all …]
/Linux-v5.15/include/trace/events/
Dpage_pool.h16 TP_PROTO(const struct page_pool *pool,
19 TP_ARGS(pool, inflight, hold, release),
22 __field(const struct page_pool *, pool)
30 __entry->pool = pool;
34 __entry->cnt = pool->destroy_cnt;
38 __entry->pool, __entry->inflight, __entry->hold,
44 TP_PROTO(const struct page_pool *pool,
47 TP_ARGS(pool, page, release),
50 __field(const struct page_pool *, pool)
57 __entry->pool = pool;
[all …]

12345678910>>...24