Lines Matching full:pool
45 /* Linker-defined symbol bound to the static pool structs */
53 static int pool_id(struct net_buf_pool *pool) in pool_id() argument
55 return pool - _net_buf_pool_list; in pool_id()
60 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in net_buf_id() local
62 return buf - pool->__bufs; in net_buf_id()
65 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool, in pool_get_uninit() argument
70 buf = &pool->__bufs[pool->buf_count - uninit_count]; in pool_get_uninit()
72 buf->pool_id = pool_id(pool); in pool_get_uninit()
99 struct k_heap *pool = buf_pool->alloc->alloc_data; in mem_pool_data_alloc() local
103 void *b = k_heap_alloc(pool, 1 + *size, timeout); in mem_pool_data_alloc()
119 struct k_heap *pool = buf_pool->alloc->alloc_data; in mem_pool_data_unref() local
128 k_heap_free(pool, ref_count); in mem_pool_data_unref()
140 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in fixed_data_alloc() local
141 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data; in fixed_data_alloc()
201 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in data_alloc() local
203 return pool->alloc->cb->alloc(buf, size, timeout); in data_alloc()
208 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in data_ref() local
210 return pool->alloc->cb->ref(buf, data); in data_ref()
215 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); in data_unref() local
221 pool->alloc->cb->unref(buf, data); in data_unref()
225 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size, in net_buf_alloc_len_debug() argument
229 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, in net_buf_alloc_len_debug()
237 __ASSERT_NO_MSG(pool); in net_buf_alloc_len_debug()
239 NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size); in net_buf_alloc_len_debug()
242 * when accessing pool->uninit_count. in net_buf_alloc_len_debug()
249 if (pool->uninit_count) { in net_buf_alloc_len_debug()
252 /* If this is not the first access to the pool, we can in net_buf_alloc_len_debug()
256 if (pool->uninit_count < pool->buf_count) { in net_buf_alloc_len_debug()
257 buf = k_lifo_get(&pool->free, K_NO_WAIT); in net_buf_alloc_len_debug()
264 uninit_count = pool->uninit_count--; in net_buf_alloc_len_debug()
267 buf = pool_get_uninit(pool, uninit_count); in net_buf_alloc_len_debug()
276 buf = k_lifo_get(&pool->free, K_NO_WAIT); in net_buf_alloc_len_debug()
279 NET_BUF_WARN("%s():%d: Pool %s low on buffers.", in net_buf_alloc_len_debug()
280 func, line, pool->name); in net_buf_alloc_len_debug()
282 NET_BUF_WARN("%s():%d: Pool %p low on buffers.", in net_buf_alloc_len_debug()
283 func, line, pool); in net_buf_alloc_len_debug()
285 buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL); in net_buf_alloc_len_debug()
287 NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs", in net_buf_alloc_len_debug()
288 func, line, pool->name, in net_buf_alloc_len_debug()
291 NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs", in net_buf_alloc_len_debug()
292 func, line, pool, in net_buf_alloc_len_debug()
297 buf = k_lifo_get(&pool->free, timeout); in net_buf_alloc_len_debug()
300 buf = k_lifo_get(&pool->free, timeout); in net_buf_alloc_len_debug()
347 atomic_dec(&pool->avail_count); in net_buf_alloc_len_debug()
348 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0); in net_buf_alloc_len_debug()
354 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool, in net_buf_alloc_fixed_debug() argument
358 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data; in net_buf_alloc_fixed_debug()
360 return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func, in net_buf_alloc_fixed_debug()
364 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, in net_buf_alloc_fixed() argument
367 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data; in net_buf_alloc_fixed()
369 return net_buf_alloc_len(pool, fixed->data_size, timeout); in net_buf_alloc_fixed()
374 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool, in net_buf_alloc_with_data_debug() argument
379 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool, in net_buf_alloc_with_data_debug()
387 buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line); in net_buf_alloc_with_data_debug()
389 buf = net_buf_alloc_len(pool, 0, timeout); in net_buf_alloc_with_data_debug()
484 struct net_buf_pool *pool; in net_buf_unref_debug() local
508 pool = net_buf_pool_get(buf->pool_id); in net_buf_unref_debug()
511 atomic_inc(&pool->avail_count); in net_buf_unref_debug()
512 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count); in net_buf_unref_debug()
515 if (pool->destroy) { in net_buf_unref_debug()
516 pool->destroy(buf); in net_buf_unref_debug()
538 struct net_buf_pool *pool; in net_buf_clone() local
543 pool = net_buf_pool_get(buf->pool_id); in net_buf_clone()
545 clone = net_buf_alloc_len(pool, 0, timeout); in net_buf_clone()
550 /* If the pool supports data referencing use that. Otherwise in net_buf_clone()
553 if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) { in net_buf_clone()
716 struct net_buf_pool *pool; in net_buf_append_bytes() local
718 /* Allocate from the original pool if no callback has in net_buf_append_bytes()
721 pool = net_buf_pool_get(buf->pool_id); in net_buf_append_bytes()
722 frag = net_buf_alloc_len(pool, len, timeout); in net_buf_append_bytes()