Lines Matching full:pool

45 /* Linker-defined symbol bound to the static pool structs */
50 struct net_buf_pool *pool; in net_buf_pool_get() local
52 STRUCT_SECTION_GET(net_buf_pool, id, &pool); in net_buf_pool_get()
54 return pool; in net_buf_pool_get()
57 static int pool_id(struct net_buf_pool *pool) in pool_id() argument
59 return pool - TYPE_SECTION_START(net_buf_pool); in pool_id()
64 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in net_buf_id() local
65 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size, in net_buf_id()
67 ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs; in net_buf_id()
72 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool, in pool_get_uninit() argument
75 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size, in pool_get_uninit()
77 size_t byte_offset = (pool->buf_count - uninit_count) * struct_size; in pool_get_uninit()
80 buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset); in pool_get_uninit()
82 buf->pool_id = pool_id(pool); in pool_get_uninit()
83 buf->user_data_size = pool->user_data_size; in pool_get_uninit()
110 struct k_heap *pool = buf_pool->alloc->alloc_data; in mem_pool_data_alloc() local
114 void *b = k_heap_alloc(pool, sizeof(void *) + *size, timeout); in mem_pool_data_alloc()
130 struct k_heap *pool = buf_pool->alloc->alloc_data; in mem_pool_data_unref() local
139 k_heap_free(pool, ref_count); in mem_pool_data_unref()
151 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in fixed_data_alloc() local
152 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data; in fixed_data_alloc()
154 *size = pool->alloc->max_alloc_size; in fixed_data_alloc()
213 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in data_alloc() local
215 return pool->alloc->cb->alloc(buf, size, timeout); in data_alloc()
220 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in data_ref() local
222 return pool->alloc->cb->ref(buf, data); in data_ref()
226 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size, in net_buf_alloc_len_debug() argument
230 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, in net_buf_alloc_len_debug()
238 __ASSERT_NO_MSG(pool); in net_buf_alloc_len_debug()
240 NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size); in net_buf_alloc_len_debug()
243 * when accessing pool->uninit_count. in net_buf_alloc_len_debug()
245 key = k_spin_lock(&pool->lock); in net_buf_alloc_len_debug()
250 if (pool->uninit_count) { in net_buf_alloc_len_debug()
253 /* If this is not the first access to the pool, we can in net_buf_alloc_len_debug()
257 if (pool->uninit_count < pool->buf_count) { in net_buf_alloc_len_debug()
258 buf = k_lifo_get(&pool->free, K_NO_WAIT); in net_buf_alloc_len_debug()
260 k_spin_unlock(&pool->lock, key); in net_buf_alloc_len_debug()
265 uninit_count = pool->uninit_count--; in net_buf_alloc_len_debug()
266 k_spin_unlock(&pool->lock, key); in net_buf_alloc_len_debug()
268 buf = pool_get_uninit(pool, uninit_count); in net_buf_alloc_len_debug()
272 k_spin_unlock(&pool->lock, key); in net_buf_alloc_len_debug()
283 buf = k_lifo_get(&pool->free, K_NO_WAIT); in net_buf_alloc_len_debug()
286 NET_BUF_WARN("%s():%d: Pool %s low on buffers.", in net_buf_alloc_len_debug()
287 func, line, pool->name); in net_buf_alloc_len_debug()
289 NET_BUF_WARN("%s():%d: Pool %p low on buffers.", in net_buf_alloc_len_debug()
290 func, line, pool); in net_buf_alloc_len_debug()
292 buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL); in net_buf_alloc_len_debug()
294 NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs", in net_buf_alloc_len_debug()
295 func, line, pool->name, in net_buf_alloc_len_debug()
298 NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs", in net_buf_alloc_len_debug()
299 func, line, pool, in net_buf_alloc_len_debug()
304 buf = k_lifo_get(&pool->free, timeout); in net_buf_alloc_len_debug()
307 buf = k_lifo_get(&pool->free, timeout); in net_buf_alloc_len_debug()
345 atomic_dec(&pool->avail_count); in net_buf_alloc_len_debug()
346 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0); in net_buf_alloc_len_debug()
352 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool, in net_buf_alloc_fixed_debug() argument
356 return net_buf_alloc_len_debug(pool, pool->alloc->max_alloc_size, timeout, func, in net_buf_alloc_fixed_debug()
360 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, in net_buf_alloc_fixed() argument
363 return net_buf_alloc_len(pool, pool->alloc->max_alloc_size, timeout); in net_buf_alloc_fixed()
368 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool, in net_buf_alloc_with_data_debug() argument
373 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool, in net_buf_alloc_with_data_debug()
381 buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line); in net_buf_alloc_with_data_debug()
383 buf = net_buf_alloc_len(pool, 0, timeout); in net_buf_alloc_with_data_debug()
464 struct net_buf_pool *pool; in net_buf_unref_debug() local
483 pool = net_buf_pool_get(buf->pool_id); in net_buf_unref_debug()
486 atomic_inc(&pool->avail_count); in net_buf_unref_debug()
487 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count); in net_buf_unref_debug()
490 if (pool->destroy) { in net_buf_unref_debug()
491 pool->destroy(buf); in net_buf_unref_debug()
513 struct net_buf_pool *pool; in net_buf_clone() local
518 pool = net_buf_pool_get(buf->pool_id); in net_buf_clone()
520 clone = net_buf_alloc_len(pool, 0, timeout); in net_buf_clone()
525 /* If the pool supports data referencing use that. Otherwise in net_buf_clone()
528 if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) { in net_buf_clone()
549 /* user_data_size should be the same for buffers from the same pool */ in net_buf_clone()
706 struct net_buf_pool *pool; in net_buf_append_bytes() local
708 /* Allocate from the original pool if no callback has in net_buf_append_bytes()
711 pool = net_buf_pool_get(buf->pool_id); in net_buf_append_bytes()
712 max_size = pool->alloc->max_alloc_size; in net_buf_append_bytes()
713 frag = net_buf_alloc_len(pool, in net_buf_append_bytes()