Home
last modified time | relevance | path

Searched full:pool (Results 1 – 25 of 1382) sorted by relevance

12345678910>>...56

/Linux-v6.1/net/xdp/
Dxsk_buff_pool.c11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->tx_descs); in xp_destroy()
[all …]
/Linux-v6.1/net/core/
Dpage_pool.c32 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
34 #define recycle_stat_inc(pool, __stat) \ argument
36 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
40 #define recycle_stat_add(pool, __stat, val) \ argument
42 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
60 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() argument
69 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
70 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
71 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats()
72 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats()
[all …]
/Linux-v6.1/drivers/net/ethernet/ti/
Dk3-cppi-desc-pool.c2 /* TI K3 CPPI5 descriptors pool API
15 #include "k3-cppi-desc-pool.h"
27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
29 if (!pool) in k3_cppi_desc_pool_destroy()
32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
36 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
38 pool->dma_addr); in k3_cppi_desc_pool_destroy()
[all …]
/Linux-v6.1/mm/
Dmempool.c5 * memory buffer pool support. Such pools are mostly used
24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
27 const int nr = pool->curr_nr; in poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
50 poison_error(pool, element, size, i); in __check_element()
57 static void check_element(mempool_t *pool, void *element) in check_element() argument
60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element()
61 __check_element(pool, element, ksize(element)); in check_element()
62 } else if (pool->free == mempool_free_pages) { in check_element()
[all …]
Ddmapool.c3 * DMA Pool allocator
14 * The current design of this allocator is fairly simple. The pool is
43 struct dma_pool { /* the pool */
71 struct dma_pool *pool; in pools_show() local
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
85 spin_lock_irq(&pool->lock); in pools_show()
86 list_for_each_entry(page, &pool->page_list, page_list) { in pools_show()
90 spin_unlock_irq(&pool->lock); in pools_show()
92 /* per-pool info, no real statistics yet */ in pools_show()
94 pool->name, blocks, in pools_show()
[all …]
Dzbud.c62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
66 * 63 freelists per pool.
78 int (*evict)(struct zbud_pool *pool, unsigned long handle);
82 * struct zbud_pool - stores metadata for each zbud pool
83 * @lock: protects all pool fields and first|last_chunk fields of any
84 * zbud page in the pool
92 * @pages_nr: number of zbud pages in the pool.
94 * pool creation time.
98 * This structure is allocated at pool creation time and maintains metadata
99 * pertaining to a particular zbud pool.
[all …]
Dz3fold.c47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * be 63, or 62, respectively, freelists per pool.
72 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
89 unsigned long pool; /* back link */ member
98 * pool
103 * @pool: pointer to the containing pool
117 struct z3fold_pool *pool; member
129 * struct z3fold_pool - stores metadata for each z3fold pool
130 * @name: pool name
131 * @lock: protects pool unbuddied/lru lists
[all …]
/Linux-v6.1/drivers/md/
Ddm-thin.c41 * The block size of the device holding pool data must be
191 * A pool device ties together a metadata device and a data device. It
198 * The pool runs in various modes. Ordered in degraded order for comparisons.
229 struct pool { struct
231 struct dm_target *ti; /* Only set if a pool target is bound */ argument
287 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
289 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
291 return pool->pf.mode; in get_pool_mode()
294 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
304 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
[all …]
/Linux-v6.1/sound/core/seq/
Dseq_memory.c22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
168 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
171 cell->next = pool->free; in free_cell()
172 pool->free = cell; in free_cell()
173 atomic_dec(&pool->counter); in free_cell()
179 struct snd_seq_pool *pool; in snd_seq_cell_free() local
183 pool = cell->pool; in snd_seq_cell_free()
[all …]
Dseq_memory.h17 struct snd_seq_pool *pool; /* used pool */ member
21 /* design note: the pool is a contiguous block of memory, if we dynamicly
22 want to add additional cells to the pool be better store this in another
23 pool as we need to know the base address of the pool when releasing
30 int total_elements; /* pool size actually allocated */
33 int size; /* pool size to be allocated */
47 /* Pool lock */
53 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
58 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument
60 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells()
[all …]
/Linux-v6.1/include/net/
Dxdp_sock_drv.h17 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
18 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
19 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
20 void xsk_tx_release(struct xsk_buff_pool *pool);
23 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
24 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
25 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
26 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
27 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
29 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument
[all …]
Dxsk_buff_pool.h26 struct xsk_buff_pool *pool; member
58 /* For performance reasons, each buff pool has its own array of dma_pages
96 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
98 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
100 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
101 void xp_destroy(struct xsk_buff_pool *pool);
102 void xp_get_pool(struct xsk_buff_pool *pool);
103 bool xp_put_pool(struct xsk_buff_pool *pool);
104 void xp_clear_dev(struct xsk_buff_pool *pool);
105 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
[all …]
/Linux-v6.1/net/ceph/
Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/Linux-v6.1/lib/
Dgenalloc.c16 * available. If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
/Linux-v6.1/drivers/staging/media/atomisp/pci/runtime/rmgr/src/
Drmgr_vbuf.c31 * @brief VBUF resource pool - refpool
36 * @brief VBUF resource pool - writepool
43 * @brief VBUF resource pool - hmmbufferpool
124 * @brief Initialize the resource pool (host, vbuf)
126 * @param pool The pointer to the pool
128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument
134 assert(pool); in ia_css_rmgr_init_vbuf()
135 if (!pool) in ia_css_rmgr_init_vbuf()
137 /* initialize the recycle pool if used */ in ia_css_rmgr_init_vbuf()
138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf()
[all …]
/Linux-v6.1/drivers/staging/octeon/
Dethernet-mem.c17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
18 * @pool: Pool to allocate an skbuff for
19 * @size: Size of the buffer needed for the pool
24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument
35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff()
42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
43 * @pool: Pool to allocate an skbuff for
44 * @size: Size of the buffer needed for the pool
47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
[all …]
/Linux-v6.1/tools/testing/selftests/drivers/net/mlxsw/
Dsharedbuffer_configuration.py16 objects, pool, tcbind and portpool. Provide an interface to get random
18 1. Pool:
22 - random pool number
30 for pool in pools:
31 self._pools.append(pool)
47 def _get_th(self, pool): argument
50 if pool["thtype"] == "dynamic":
58 for pool in self._pools:
59 if pool["type"] == "ingress":
60 ing_pools.append(pool)
[all …]
/Linux-v6.1/include/linux/
Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_icm_pool.c13 struct mutex mutex; /* protect the ICM pool and ICM buddy */
62 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr()
74 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr()
82 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size()
91 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument
93 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create()
103 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create()
105 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create()
106 pool->icm_type); in dr_icm_pool_mr_create()
108 if (pool->icm_type == DR_ICM_TYPE_STE) { in dr_icm_pool_mr_create()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/display/dc/dce80/
Ddce80_resource.c800 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument
804 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct()
805 if (pool->base.opps[i] != NULL) in dce80_resource_destruct()
806 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct()
808 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct()
809 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct()
811 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct()
812 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct()
814 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct()
815 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum_cnt.c24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */
54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/display/dc/dce60/
Ddce60_resource.c797 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument
801 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct()
802 if (pool->base.opps[i] != NULL) in dce60_resource_destruct()
803 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct()
805 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct()
806 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct()
808 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct()
809 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct()
811 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct()
812 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/
Dirq_affinity.c8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument
10 pool->irqs_per_cpu[cpu]--; in cpu_put()
13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument
15 pool->irqs_per_cpu[cpu]++; in cpu_get()
19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument
27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded()
33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded()
38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded()
42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded()
48 irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) in irq_pool_request_irq() argument
[all …]
/Linux-v6.1/arch/mips/include/asm/octeon/
Dcvmx-fpa.h31 * Interface to the hardware Free Pool Allocator.
79 * Structure describing the current state of a FPA pool.
88 /* The number of elements in the pool at creation */
101 * Return the name of the pool
103 * @pool: Pool to get the name of
106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument
108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name()
112 * Return the base of the pool
114 * @pool: Pool to get the base of
117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument
[all …]
/Linux-v6.1/drivers/infiniband/sw/rxe/
Drxe_pool.c92 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, in rxe_pool_init() argument
97 memset(pool, 0, sizeof(*pool)); in rxe_pool_init()
99 pool->rxe = rxe; in rxe_pool_init()
100 pool->name = info->name; in rxe_pool_init()
101 pool->type = type; in rxe_pool_init()
102 pool->max_elem = info->max_elem; in rxe_pool_init()
103 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); in rxe_pool_init()
104 pool->elem_offset = info->elem_offset; in rxe_pool_init()
105 pool->cleanup = info->cleanup; in rxe_pool_init()
107 atomic_set(&pool->num_elem, 0); in rxe_pool_init()
[all …]

12345678910>>...56