Lines Matching refs:slab
30 struct k_mem_slab *slab; in k_mem_slab_stats_raw() local
33 slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core); in k_mem_slab_stats_raw()
34 key = k_spin_lock(&slab->lock); in k_mem_slab_stats_raw()
35 memcpy(stats, &slab->info, sizeof(slab->info)); in k_mem_slab_stats_raw()
36 k_spin_unlock(&slab->lock, key); in k_mem_slab_stats_raw()
45 struct k_mem_slab *slab; in k_mem_slab_stats_query() local
49 slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core); in k_mem_slab_stats_query()
50 key = k_spin_lock(&slab->lock); in k_mem_slab_stats_query()
51 ptr->free_bytes = (slab->info.num_blocks - slab->info.num_used) * in k_mem_slab_stats_query()
52 slab->info.block_size; in k_mem_slab_stats_query()
53 ptr->allocated_bytes = slab->info.num_used * slab->info.block_size; in k_mem_slab_stats_query()
55 ptr->max_allocated_bytes = slab->info.max_used * slab->info.block_size; in k_mem_slab_stats_query()
59 k_spin_unlock(&slab->lock, key); in k_mem_slab_stats_query()
68 struct k_mem_slab *slab; in k_mem_slab_stats_reset() local
71 slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core); in k_mem_slab_stats_reset()
72 key = k_spin_lock(&slab->lock); in k_mem_slab_stats_reset()
75 slab->info.max_used = slab->info.num_used; in k_mem_slab_stats_reset()
78 k_spin_unlock(&slab->lock, key); in k_mem_slab_stats_reset()
104 static int create_free_list(struct k_mem_slab *slab) in create_free_list() argument
109 CHECKIF(((slab->info.block_size | (uintptr_t)slab->buffer) & in create_free_list()
114 slab->free_list = NULL; in create_free_list()
115 p = slab->buffer + slab->info.block_size * (slab->info.num_blocks - 1); in create_free_list()
117 while (p >= slab->buffer) { in create_free_list()
118 *(char **)p = slab->free_list; in create_free_list()
119 slab->free_list = p; in create_free_list()
120 p -= slab->info.block_size; in create_free_list()
148 STRUCT_SECTION_FOREACH(k_mem_slab, slab) { in init_mem_slab_obj_core_list()
149 rc = create_free_list(slab); in init_mem_slab_obj_core_list()
153 k_object_init(slab); in init_mem_slab_obj_core_list()
156 k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab); in init_mem_slab_obj_core_list()
158 k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info, in init_mem_slab_obj_core_list()
171 int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, in k_mem_slab_init() argument
176 slab->info.num_blocks = num_blocks; in k_mem_slab_init()
177 slab->info.block_size = block_size; in k_mem_slab_init()
178 slab->buffer = buffer; in k_mem_slab_init()
179 slab->info.num_used = 0U; in k_mem_slab_init()
180 slab->lock = (struct k_spinlock) {}; in k_mem_slab_init()
183 slab->info.max_used = 0U; in k_mem_slab_init()
186 rc = create_free_list(slab); in k_mem_slab_init()
192 k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab); in k_mem_slab_init()
195 k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info, in k_mem_slab_init()
199 z_waitq_init(&slab->wait_q); in k_mem_slab_init()
200 k_object_init(slab); in k_mem_slab_init()
202 SYS_PORT_TRACING_OBJ_INIT(k_mem_slab, slab, rc); in k_mem_slab_init()
208 static bool slab_ptr_is_good(struct k_mem_slab *slab, const void *ptr) in slab_ptr_is_good() argument
211 ptrdiff_t offset = p - slab->buffer; in slab_ptr_is_good()
214 (offset < (slab->info.block_size * slab->info.num_blocks)) && in slab_ptr_is_good()
215 ((offset % slab->info.block_size) == 0); in slab_ptr_is_good()
219 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) in k_mem_slab_alloc() argument
221 k_spinlock_key_t key = k_spin_lock(&slab->lock); in k_mem_slab_alloc()
224 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, alloc, slab, timeout); in k_mem_slab_alloc()
226 if (slab->free_list != NULL) { in k_mem_slab_alloc()
228 *mem = slab->free_list; in k_mem_slab_alloc()
229 slab->free_list = *(char **)(slab->free_list); in k_mem_slab_alloc()
230 slab->info.num_used++; in k_mem_slab_alloc()
231 __ASSERT((slab->free_list == NULL && in k_mem_slab_alloc()
232 slab->info.num_used == slab->info.num_blocks) || in k_mem_slab_alloc()
233 slab_ptr_is_good(slab, slab->free_list), in k_mem_slab_alloc()
237 slab->info.max_used = MAX(slab->info.num_used, in k_mem_slab_alloc()
238 slab->info.max_used); in k_mem_slab_alloc()
248 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mem_slab, alloc, slab, timeout); in k_mem_slab_alloc()
251 result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout); in k_mem_slab_alloc()
256 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); in k_mem_slab_alloc()
261 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); in k_mem_slab_alloc()
263 k_spin_unlock(&slab->lock, key); in k_mem_slab_alloc()
268 void k_mem_slab_free(struct k_mem_slab *slab, void *mem) in k_mem_slab_free() argument
270 k_spinlock_key_t key = k_spin_lock(&slab->lock); in k_mem_slab_free()
272 __ASSERT(slab_ptr_is_good(slab, mem), "Invalid memory pointer provided"); in k_mem_slab_free()
274 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab); in k_mem_slab_free()
275 if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) { in k_mem_slab_free()
276 struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); in k_mem_slab_free()
279 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); in k_mem_slab_free()
283 z_reschedule(&slab->lock, key); in k_mem_slab_free()
287 *(char **) mem = slab->free_list; in k_mem_slab_free()
288 slab->free_list = (char *) mem; in k_mem_slab_free()
289 slab->info.num_used--; in k_mem_slab_free()
291 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); in k_mem_slab_free()
293 k_spin_unlock(&slab->lock, key); in k_mem_slab_free()
296 int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats) in k_mem_slab_runtime_stats_get() argument
298 if ((slab == NULL) || (stats == NULL)) { in k_mem_slab_runtime_stats_get()
302 k_spinlock_key_t key = k_spin_lock(&slab->lock); in k_mem_slab_runtime_stats_get()
304 stats->allocated_bytes = slab->info.num_used * slab->info.block_size; in k_mem_slab_runtime_stats_get()
305 stats->free_bytes = (slab->info.num_blocks - slab->info.num_used) * in k_mem_slab_runtime_stats_get()
306 slab->info.block_size; in k_mem_slab_runtime_stats_get()
308 stats->max_allocated_bytes = slab->info.max_used * in k_mem_slab_runtime_stats_get()
309 slab->info.block_size; in k_mem_slab_runtime_stats_get()
314 k_spin_unlock(&slab->lock, key); in k_mem_slab_runtime_stats_get()
320 int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab) in k_mem_slab_runtime_stats_reset_max() argument
322 if (slab == NULL) { in k_mem_slab_runtime_stats_reset_max()
326 k_spinlock_key_t key = k_spin_lock(&slab->lock); in k_mem_slab_runtime_stats_reset_max()
328 slab->info.max_used = slab->info.num_used; in k_mem_slab_runtime_stats_reset_max()
330 k_spin_unlock(&slab->lock, key); in k_mem_slab_runtime_stats_reset_max()