/Linux-v5.15/net/xdp/ |
D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool->heads); in xp_destroy() [all …]
|
/Linux-v5.15/drivers/net/ethernet/ti/ |
D | k3-cppi-desc-pool.c | 2 /* TI K3 CPPI5 descriptors pool API 15 #include "k3-cppi-desc-pool.h" 27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 29 if (!pool) in k3_cppi_desc_pool_destroy() 32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 36 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 38 pool->dma_addr); in k3_cppi_desc_pool_destroy() [all …]
|
/Linux-v5.15/mm/ |
D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 28 const int nr = pool->curr_nr; in poison_error() 34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 51 poison_error(pool, element, size, i); in __check_element() 58 static void check_element(mempool_t *pool, void *element) in check_element() argument 61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element() 62 __check_element(pool, element, ksize(element)); in check_element() 63 } else if (pool->free == mempool_free_pages) { in check_element() [all …]
|
D | dmapool.c | 3 * DMA Pool allocator 14 * The current design of this allocator is fairly simple. The pool is 43 struct dma_pool { /* the pool */ 71 struct dma_pool *pool; in pools_show() local 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 85 spin_lock_irq(&pool->lock); in pools_show() 86 list_for_each_entry(page, &pool->page_list, page_list) { in pools_show() 90 spin_unlock_irq(&pool->lock); in pools_show() 92 /* per-pool info, no real statistics yet */ in pools_show() 94 pool->name, blocks, in pools_show() [all …]
|
D | zbud.c | 62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 66 * 63 freelists per pool. 78 int (*evict)(struct zbud_pool *pool, unsigned long handle); 82 * struct zbud_pool - stores metadata for each zbud pool 83 * @lock: protects all pool fields and first|last_chunk fields of any 84 * zbud page in the pool 92 * @pages_nr: number of zbud pages in the pool. 94 * pool creation time. 98 * This structure is allocated at pool creation time and maintains metadata 99 * pertaining to a particular zbud pool. [all …]
|
D | z3fold.c | 51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 56 * be 63, or 62, respectively, freelists per pool. 76 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 93 unsigned long pool; /* back link */ member 102 * pool 107 * @pool: pointer to the containing pool 121 struct z3fold_pool *pool; member 133 * struct z3fold_pool - stores metadata for each z3fold pool 134 * @name: pool name 135 * @lock: protects pool unbuddied/lru lists [all …]
|
/Linux-v5.15/net/core/ |
D | page_pool.c | 29 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument 34 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init() 37 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init() 40 if (pool->p.pool_size) in page_pool_init() 41 ring_qsize = pool->p.pool_size; in page_pool_init() 51 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init() 52 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init() 53 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init() 57 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init() 61 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init() [all …]
|
/Linux-v5.15/drivers/infiniband/sw/rxe/ |
D | rxe_pool.c | 89 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument 91 return rxe_type_info[pool->type].name; in pool_name() 94 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument 99 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index() 105 pool->index.max_index = max; in rxe_pool_init_index() 106 pool->index.min_index = min; in rxe_pool_init_index() 109 pool->index.table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index() 110 if (!pool->index.table) { in rxe_pool_init_index() 115 pool->index.table_size = size; in rxe_pool_init_index() 116 bitmap_zero(pool->index.table, max - min + 1); in rxe_pool_init_index() [all …]
|
/Linux-v5.15/drivers/md/ |
D | dm-thin.c | 41 * The block size of the device holding pool data must be 191 * A pool device ties together a metadata device and a data device. It 198 * The pool runs in various modes. Ordered in degraded order for comparisons. 229 struct pool { struct 231 struct dm_target *ti; /* Only set if a pool target is bound */ argument 289 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 291 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 293 return pool->pf.mode; in get_pool_mode() 296 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 306 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() [all …]
|
/Linux-v5.15/sound/core/seq/ |
D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 165 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 168 cell->next = pool->free; in free_cell() 169 pool->free = cell; in free_cell() 170 atomic_dec(&pool->counter); in free_cell() 176 struct snd_seq_pool *pool; in snd_seq_cell_free() local 180 pool = cell->pool; in snd_seq_cell_free() [all …]
|
D | seq_memory.h | 17 struct snd_seq_pool *pool; /* used pool */ member 21 /* design note: the pool is a contiguous block of memory, if we dynamicly 22 want to add additional cells to the pool be better store this in another 23 pool as we need to know the base address of the pool when releasing 30 int total_elements; /* pool size actually allocated */ 33 int size; /* pool size to be allocated */ 47 /* Pool lock */ 53 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 58 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument 60 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells() [all …]
|
/Linux-v5.15/include/net/ |
D | xdp_sock_drv.h | 14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 16 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max); 17 void xsk_tx_release(struct xsk_buff_pool *pool); 20 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 21 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 22 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 23 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 24 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 26 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument [all …]
|
/Linux-v5.15/net/ceph/ |
D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/Linux-v5.15/lib/ |
D | genalloc.c | 16 * available. If new memory is added to the pool a lock has to be 146 * gen_pool_create - create a new special memory pool 148 * @nid: node id of the node the pool structure should be allocated on, or -1 150 * Create a new special memory pool that can be used to manage special purpose 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() [all …]
|
/Linux-v5.15/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
D | rmgr_vbuf.c | 31 * @brief VBUF resource pool - refpool 36 * @brief VBUF resource pool - writepool 43 * @brief VBUF resource pool - hmmbufferpool 124 * @brief Initialize the resource pool (host, vbuf) 126 * @param pool The pointer to the pool 128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument 134 assert(pool); in ia_css_rmgr_init_vbuf() 135 if (!pool) in ia_css_rmgr_init_vbuf() 137 /* initialize the recycle pool if used */ in ia_css_rmgr_init_vbuf() 138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() [all …]
|
/Linux-v5.15/drivers/staging/octeon/ |
D | ethernet-mem.c | 17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs 18 * @pool: Pool to allocate an skbuff for 19 * @size: Size of the buffer needed for the pool 24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs 43 * @pool: Pool to allocate an skbuff for 44 * @size: Size of the buffer needed for the pool 47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() [all …]
|
/Linux-v5.15/tools/testing/selftests/drivers/net/mlxsw/ |
D | sharedbuffer_configuration.py | 16 objects, pool, tcbind and portpool. Provide an interface to get random 18 1. Pool: 22 - random pool number 30 for pool in pools: 31 self._pools.append(pool) 47 def _get_th(self, pool): argument 50 if pool["thtype"] == "dynamic": 58 for pool in self._pools: 59 if pool["type"] == "ingress": 60 ing_pools.append(pool) [all …]
|
/Linux-v5.15/include/linux/ |
D | genalloc.h | 16 * available. If new memory is added to the pool a lock has to be 46 * @pool: the pool being allocated from 52 void *data, struct gen_pool *pool, 56 * General purpose special memory pool descriptor. 60 struct list_head chunks; /* list of chunks in this pool */ 70 * General purpose special memory pool chunk descriptor. 73 struct list_head next_chunk; /* next chunk in pool */ 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() [all …]
|
/Linux-v5.15/arch/mips/include/asm/octeon/ |
D | cvmx-fpa.h | 31 * Interface to the hardware Free Pool Allocator. 79 * Structure describing the current state of a FPA pool. 88 /* The number of elements in the pool at creation */ 101 * Return the name of the pool 103 * @pool: Pool to get the name of 106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument 108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name() 112 * Return the base of the pool 114 * @pool: Pool to get the base of 117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_cnt.c | 24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */ 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get() [all …]
|
/Linux-v5.15/drivers/gpu/drm/amd/display/dc/dce80/ |
D | dce80_resource.c | 801 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument 805 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct() 806 if (pool->base.opps[i] != NULL) in dce80_resource_destruct() 807 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct() 809 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct() 810 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct() 812 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct() 813 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct() 815 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct() 816 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct() [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_icm_pool.c | 14 struct mutex mutex; /* protect the ICM pool and ICM buddy */ 62 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument 64 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create() 74 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create() 76 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create() 77 pool->icm_type); in dr_icm_pool_mr_create() 79 if (pool->icm_type == DR_ICM_TYPE_STE) { in dr_icm_pool_mr_create() 93 mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); in dr_icm_pool_mr_create() 98 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, in dr_icm_pool_mr_create() 104 mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err); in dr_icm_pool_mr_create() [all …]
|
/Linux-v5.15/drivers/gpu/drm/amd/display/dc/dce60/ |
D | dce60_resource.c | 796 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 800 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 801 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 802 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 804 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 805 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 807 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 808 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 810 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 811 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct() [all …]
|
/Linux-v5.15/drivers/gpu/drm/i915/gt/ |
D | intel_gt_buffer_pool.c | 12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool) in to_gt() argument 14 return container_of(pool, struct intel_gt, buffer_pool); in to_gt() 18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument 28 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 29 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 31 return &pool->cache_list[n]; in bucket_for_size() 41 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument 48 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than() 49 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than() 54 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than() [all …]
|
/Linux-v5.15/net/rds/ |
D | ib_rdma.c | 198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument 204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr() 205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr() 206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr() 209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr() 275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr() 281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument 285 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal() 337 * Flush our pool of MRs. [all …]
|