1 /*
2 * Copyright (c) 2023, Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/sys/mem_blocks.h>
9
10 SYS_MEM_BLOCKS_DEFINE(mem_block, 32, 4, 16); /* Four 32 byte blocks */
11
12 K_MEM_SLAB_DEFINE(mem_slab, 32, 4, 16); /* Four 32 byte blocks */
13
14 #if !defined(CONFIG_ARCH_POSIX) && !defined(CONFIG_SPARC) && !defined(CONFIG_MIPS)
15 static void test_thread_entry(void *, void *, void *);
16 K_THREAD_DEFINE(test_thread, 1024 + CONFIG_TEST_EXTRA_STACK_SIZE,
17 test_thread_entry, NULL, NULL, NULL,
18 K_HIGHEST_THREAD_PRIO, 0, 0);
19
20 K_SEM_DEFINE(wake_main_thread, 0, 1);
21 K_SEM_DEFINE(wake_test_thread, 0, 1);
22 #endif /* !CONFIG_ARCH_POSIX && !CONFIG_SPARC && !CONFIG_MIPS */
23
24 #if CONFIG_MP_MAX_NUM_CPUS > 1
25 K_THREAD_STACK_ARRAY_DEFINE(busy_thread_stack, CONFIG_MP_MAX_NUM_CPUS - 1,
26 512 + CONFIG_TEST_EXTRA_STACK_SIZE);
27
28 struct k_thread busy_thread[CONFIG_MP_MAX_NUM_CPUS - 1];
29
busy_thread_entry(void * p1,void * p2,void * p3)30 void busy_thread_entry(void *p1, void *p2, void *p3)
31 {
32 while (1) {
33 /* Busy loop to prevent CPU from entering idle */
34 }
35 }
36
37 #endif
38
39 /***************** SYSTEM (CPUs and KERNEL) ******************/
40
41 /*
42 * As the k_obj_core_stats_xxx() APIs are essentially wrappers to the
43 * thread runtime stats APIs, limit this test to the same architectures as
44 * that thread runtime stats test.
45 */
46
47 #if !defined(CONFIG_ARCH_POSIX) && !defined(CONFIG_SPARC) && !defined(CONFIG_MIPS)
ZTEST(obj_core_stats_system,test_obj_core_stats_system)48 ZTEST(obj_core_stats_system, test_obj_core_stats_system)
49 {
50 int status;
51 struct k_cycle_stats kernel_raw[CONFIG_MP_MAX_NUM_CPUS];
52 struct k_cycle_stats cpu_raw;
53 struct k_thread_runtime_stats kernel_query;
54 struct k_thread_runtime_stats cpu_query;
55 struct k_thread_runtime_stats sum_query;
56 unsigned int i;
57
58 #if CONFIG_MP_MAX_NUM_CPUS > 1
59
60 /* Create 1 busy thread for each core except the current */
61
62 int prio;
63
64 prio = k_thread_priority_get(k_current_get());
65
66 for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS - 1; i++) {
67 k_thread_create(&busy_thread[i], busy_thread_stack[i],
68 K_THREAD_STACK_SIZEOF(busy_thread_stack[i]),
69 busy_thread_entry, NULL, NULL, NULL,
70 prio + 10, 0, K_NO_WAIT);
71 }
72 #endif
73
74 status = k_obj_core_stats_raw(K_OBJ_CORE(&_kernel), kernel_raw,
75 sizeof(kernel_raw));
76 zassert_equal(status, 0, "Expected 0, got %d\n", status);
77
78 /*
79 * Not much can be predicted for the raw stats aside from the
80 * contents of the CPU sampling to be at least as large as
81 * kernel sampling. The same goes for the query stats.
82 */
83
84 for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
85 status = k_obj_core_stats_raw(K_OBJ_CORE(&_kernel.cpus[i]),
86 &cpu_raw, sizeof(cpu_raw));
87 zassert_equal(status, 0, "Expected 0, got %d on CPU %u\n",
88 status, i);
89
90 zassert_true(cpu_raw.total >= kernel_raw[i].total);
91 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
92 zassert_true(cpu_raw.current >= kernel_raw[i].current);
93 zassert_true(cpu_raw.longest >= kernel_raw[i].longest);
94 zassert_true(cpu_raw.num_windows >= kernel_raw[i].num_windows);
95 #endif
96 zassert_true(cpu_raw.track_usage == kernel_raw[i].track_usage);
97 }
98
99 status = k_obj_core_stats_query(K_OBJ_CORE(&_kernel), &kernel_query,
100 sizeof(kernel_query));
101 zassert_equal(status, 0, "Expected 0, got %d\n", status);
102
103 sum_query = (struct k_thread_runtime_stats){};
104
105 for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
106 status = k_obj_core_stats_query(K_OBJ_CORE(&_kernel.cpus[i]),
107 &cpu_query, sizeof(cpu_query));
108 zassert_equal(status, 0, "Expected 0, got %d on CPU %u\n",
109 status, i);
110
111 #ifdef CONFIG_SCHED_THREAD_USAGE
112 sum_query.execution_cycles += cpu_query.execution_cycles;
113 sum_query.total_cycles += cpu_query.total_cycles;
114 #endif
115 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
116 sum_query.current_cycles += cpu_query.current_cycles;
117 sum_query.peak_cycles += cpu_query.peak_cycles;
118 sum_query.average_cycles += cpu_query.average_cycles;
119 #endif
120 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
121 sum_query.idle_cycles += cpu_query.idle_cycles;
122 #endif
123 }
124
125 #ifdef CONFIG_SCHED_THREAD_USAGE
126 zassert_true(sum_query.execution_cycles >= kernel_query.execution_cycles);
127 zassert_true(sum_query.total_cycles >= kernel_query.total_cycles);
128 #endif
129 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
130 zassert_true(sum_query.current_cycles >= kernel_query.current_cycles);
131 zassert_true(sum_query.peak_cycles >= kernel_query.peak_cycles);
132 zassert_true(sum_query.average_cycles >= kernel_query.average_cycles);
133 #endif
134 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
135 zassert_true(sum_query.idle_cycles >= kernel_query.idle_cycles);
136 #endif
137 }
138 #endif /* !CONFIG_ARCH_POSIX && !CONFIG_SPARC && !CONFIG_MIPS */
139
ZTEST(obj_core_stats_system,test_obj_core_stats_cpu_reset)140 ZTEST(obj_core_stats_system, test_obj_core_stats_cpu_reset)
141 {
142 int status;
143
144 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
145 status = k_obj_core_stats_reset(K_OBJ_CORE(&_kernel.cpus[i]));
146 zassert_equal(status, -ENOTSUP,
147 "Expected %d, got %d on CPU%d\n",
148 -ENOTSUP, status, i);
149 }
150 }
151
ZTEST(obj_core_stats_system,test_obj_core_stats_cpu_disable)152 ZTEST(obj_core_stats_system, test_obj_core_stats_cpu_disable)
153 {
154 int status;
155
156 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
157 status = k_obj_core_stats_disable(K_OBJ_CORE(&_kernel.cpus[i]));
158 zassert_equal(status, -ENOTSUP,
159 "Expected %d, got %d on CPU%d\n",
160 -ENOTSUP, status, i);
161 }
162 }
163
ZTEST(obj_core_stats_system,test_obj_core_stats_cpu_enable)164 ZTEST(obj_core_stats_system, test_obj_core_stats_cpu_enable)
165 {
166 int status;
167
168 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
169 status = k_obj_core_stats_enable(K_OBJ_CORE(&_kernel.cpus[i]));
170 zassert_equal(status, -ENOTSUP,
171 "Expected %d, got %d on CPU%d\n",
172 -ENOTSUP, status, i);
173 }
174 }
175
ZTEST(obj_core_stats_system,test_obj_core_stats_kernel_reset)176 ZTEST(obj_core_stats_system, test_obj_core_stats_kernel_reset)
177 {
178 int status;
179
180 status = k_obj_core_stats_reset(K_OBJ_CORE(&_kernel));
181 zassert_equal(status, -ENOTSUP, "Expected %d, got %d\n",
182 -ENOTSUP, status);
183 }
184
ZTEST(obj_core_stats_system,test_obj_core_stats_kernel_disable)185 ZTEST(obj_core_stats_system, test_obj_core_stats_kernel_disable)
186 {
187 int status;
188
189 status = k_obj_core_stats_disable(K_OBJ_CORE(&_kernel));
190 zassert_equal(status, -ENOTSUP, "Expected %d, got %d\n",
191 -ENOTSUP, status);
192 }
193
ZTEST(obj_core_stats_system,test_obj_core_stats_kernel_enable)194 ZTEST(obj_core_stats_system, test_obj_core_stats_kernel_enable)
195 {
196 int status;
197
198 status = k_obj_core_stats_enable(K_OBJ_CORE(&_kernel));
199 zassert_equal(status, -ENOTSUP, "Expected %d, got %d\n",
200 -ENOTSUP, status);
201 }
202
203 /***************** THREADS ******************/
204
205 #if !defined(CONFIG_ARCH_POSIX) && !defined(CONFIG_SPARC) && !defined(CONFIG_MIPS)
206 /*
207 * As the k_obj_core_stats_xxx() APIs are essentially wrappers to the
208 * thread runtime stats APIs, limit this test to the same architectures as
209 * that thread runtime stats test.
210 */
test_thread_entry(void * p1,void * p2,void * p3)211 void test_thread_entry(void *p1, void *p2, void *p3)
212 {
213 while (1) {
214 k_busy_wait(10000);
215
216 k_sem_give(&wake_main_thread);
217 k_sem_take(&wake_test_thread, K_FOREVER);
218 }
219 }
220
ZTEST(obj_core_stats_thread,test_obj_core_stats_thread_test)221 ZTEST(obj_core_stats_thread, test_obj_core_stats_thread_test)
222 {
223 struct k_cycle_stats raw1;
224 struct k_cycle_stats raw2;
225 struct k_thread_runtime_stats query1;
226 struct k_thread_runtime_stats query2;
227 struct k_thread_runtime_stats query3;
228 int status;
229
230 k_sem_take(&wake_main_thread, K_FOREVER);
231 k_busy_wait(10000);
232
233 /* test_thread should now be blocked on wake_test_thread */
234
235 status = k_obj_core_stats_raw(K_OBJ_CORE(test_thread), &raw1,
236 sizeof(raw1));
237 zassert_equal(status, 0, "Expected 0, got %d", status);
238
239 status = k_obj_core_stats_query(K_OBJ_CORE(test_thread), &query1,
240 sizeof(query1));
241 zassert_equal(status, 0, "Expected 0, got %d", status);
242
243 /*
244 * Busy wait for 10 msec. As test_thread should still be blocked,
245 * its stats data should not change.
246 */
247
248 k_busy_wait(10000);
249
250 status = k_obj_core_stats_raw(K_OBJ_CORE(test_thread), &raw2,
251 sizeof(raw2));
252 zassert_equal(status, 0, "Expected 0, got %d", status);
253
254 status = k_obj_core_stats_query(K_OBJ_CORE(test_thread), &query2,
255 sizeof(query2));
256 zassert_equal(status, 0, "Expected 0, got %d", status);
257
258 zassert_mem_equal(&raw1, &raw2, sizeof(raw1),
259 "Thread raw stats changed while blocked\n");
260 zassert_mem_equal(&query1, &query2, sizeof(query1),
261 "Thread query stats changed while blocked\n");
262
263 /*
264 * Let test_thread execute for a short bit and then re-sample the
265 * stats. As the k_obj_core_stats_query() backend is identical to
266 * that of k_thread_runtime_stats_get(), their queries should be
267 * identical (and different from the previous sample).
268 */
269
270 k_sem_give(&wake_test_thread);
271 k_sem_take(&wake_main_thread, K_FOREVER);
272 k_busy_wait(10000);
273
274 /* test_thread should now be blocked. */
275
276 status = k_obj_core_stats_query(K_OBJ_CORE(test_thread), &query2,
277 sizeof(query3));
278 zassert_equal(status, 0, "Expected 0, got %d\n", status);
279
280 status = k_thread_runtime_stats_get(test_thread, &query3);
281 zassert_equal(status, 0, "Expected 0, got %d\n", status);
282 zassert_mem_equal(&query2, &query3, sizeof(query2),
283 "Queries not equal!\n");
284
285 #ifdef CONFIG_SCHED_THREAD_USAGE
286 zassert_true(query2.execution_cycles > query1.execution_cycles,
287 "Execution cycles did not increase\n");
288 zassert_true(query2.total_cycles > query1.total_cycles,
289 "Total cycles did not increase\n");
290 #endif
291
292 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
293
294 /*
295 * [current_cycles], [peak_cycles] and [average_cycles] can not be
296 * predicted by this test.
297 */
298
299 #endif
300
301 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
302 zassert_equal(query2.idle_cycles, 0,
303 "Expected 0, got %llu\n", query2.idle_cycles);
304 #endif
305
306 /* Reset the stats */
307
308 status = k_obj_core_stats_reset(K_OBJ_CORE(test_thread));
309 zassert_equal(status, 0, "Expected 0, got %d\n", status);
310
311 status = k_obj_core_stats_query(K_OBJ_CORE(test_thread),
312 &query3, sizeof(query3));
313 zassert_equal(status, 0, "Expected 0, got %d\n", status);
314
315 #ifdef CONFIG_SCHED_THREAD_USAGE
316 zassert_equal(query3.execution_cycles, 0,
317 "Expected 0, got %llu\n", query3.execution_cycles);
318 zassert_equal(query3.total_cycles, 0,
319 "Expected 0, got %llu\n", query3.total_cycles);
320 #endif
321
322 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
323 zassert_equal(query3.current_cycles, 0,
324 "Expected 0, got %llu\n", query3.current_cycles);
325 zassert_equal(query3.peak_cycles, 0,
326 "Expected 0, got %llu\n", query3.peak_cycles);
327 zassert_equal(query3.average_cycles, 0,
328 "Expected 0, got %llu\n", query3.average_cycles);
329 #endif
330
331 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
332 zassert_equal(query3.idle_cycles, 0,
333 "Expected 0, got %llu\n", query3.idle_cycles);
334 #endif
335
336 /* Disable the stats (re-using query2 and query3) */
337
338 status = k_obj_core_stats_disable(K_OBJ_CORE(test_thread));
339 zassert_equal(status, 0, "Expected 0, got %llu\n", status);
340
341 k_sem_give(&wake_test_thread);
342 k_sem_take(&wake_main_thread, K_FOREVER);
343 k_busy_wait(10000);
344
345 k_obj_core_stats_query(K_OBJ_CORE(test_thread),
346 &query2, sizeof(query2));
347
348 zassert_mem_equal(&query2, &query3, sizeof(query2),
349 "Stats changed while disabled!\n");
350
351 /* Enable the stats */
352
353 status = k_obj_core_stats_enable(K_OBJ_CORE(test_thread));
354 zassert_equal(status, 0, "Expected 0, got %llu\n", status);
355
356 k_sem_give(&wake_test_thread);
357 k_sem_take(&wake_main_thread, K_FOREVER);
358 k_busy_wait(10000);
359
360 k_obj_core_stats_query(K_OBJ_CORE(test_thread),
361 &query3, sizeof(query3));
362
363 /* We can not predict the stats, but they should be non-zero. */
364
365 #ifdef CONFIG_SCHED_THREAD_USAGE
366 zassert_true(query3.execution_cycles > 0);
367 zassert_true(query3.total_cycles > 0);
368 #endif
369 #ifdef CONFIG_SCHED_THREAD_USAGE
370 zassert_true(query3.current_cycles > 0);
371 zassert_true(query3.peak_cycles > 0);
372 zassert_true(query3.average_cycles > 0);
373 #endif
374 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
375 zassert_true(query3.idle_cycles == 0);
376 #endif
377
378 k_thread_abort(test_thread);
379 }
380 #endif /* !CONFIG_ARCH_POSIX && !CONFIG_SPARC && !CONFIG_MIPS */
381
382 /***************** SYSTEM MEMORY BLOCKS *********************/
383
ZTEST(obj_core_stats_mem_block,test_sys_mem_block_enable)384 ZTEST(obj_core_stats_mem_block, test_sys_mem_block_enable)
385 {
386 int status;
387
388 status = k_obj_core_stats_enable(K_OBJ_CORE(&mem_block));
389 zassert_equal(status, -ENOTSUP,
390 "Not supposed to be supported. Got %d, not %d\n",
391 status, -ENOTSUP);
392 }
393
ZTEST(obj_core_stats_mem_block,test_sys_mem_block_disable)394 ZTEST(obj_core_stats_mem_block, test_sys_mem_block_disable)
395 {
396 int status;
397
398 status = k_obj_core_stats_disable(K_OBJ_CORE(&mem_block));
399 zassert_equal(status, -ENOTSUP,
400 "Not supposed to be supported. Got %d, not %d\n",
401 status, -ENOTSUP);
402 }
403
test_mem_block_raw(const char * str,struct sys_mem_blocks_info * expected)404 static void test_mem_block_raw(const char *str,
405 struct sys_mem_blocks_info *expected)
406 {
407 int status;
408 struct sys_mem_blocks_info raw;
409
410 status = k_obj_core_stats_raw(K_OBJ_CORE(&mem_block), &raw,
411 sizeof(raw));
412 zassert_equal(status, 0,
413 "%s: Failed to get raw stats (%d)\n", str, status);
414
415 zassert_equal(raw.num_blocks, expected->num_blocks,
416 "%s: Expected %u blocks, got %u\n",
417 str, expected->num_blocks, raw.num_blocks);
418 zassert_equal(raw.blk_sz_shift, expected->blk_sz_shift,
419 "%s: Expected blk_sz_shift=%u, got %u\n",
420 str, expected->blk_sz_shift, raw.blk_sz_shift);
421 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
422 zassert_equal(raw.used_blocks, expected->used_blocks,
423 "%s: Expected %u used, got %d\n",
424 str, expected->used_blocks, raw.used_blocks);
425 zassert_equal(raw.max_used_blocks, expected->max_used_blocks,
426 "%s: Expected max %u used, got %d\n",
427 str, expected->max_used_blocks, raw.max_used_blocks);
428 #endif
429 }
430
test_mem_block_query(const char * str,struct sys_memory_stats * expected)431 static void test_mem_block_query(const char *str,
432 struct sys_memory_stats *expected)
433 {
434 struct sys_memory_stats query;
435 int status;
436
437 status = k_obj_core_stats_query(K_OBJ_CORE(&mem_block), &query,
438 sizeof(query));
439 zassert_equal(status, 0,
440 "%s: Failed to get query stats (%d)\n", str, status);
441
442 zassert_equal(query.free_bytes, expected->free_bytes,
443 "%s: Expected %u free bytes, got %u\n",
444 str, expected->free_bytes, query.free_bytes);
445 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
446 zassert_equal(query.allocated_bytes, expected->allocated_bytes,
447 "%s: Expected %u allocated bytes, got %u\n",
448 str, expected->allocated_bytes, query.allocated_bytes);
449 zassert_equal(query.max_allocated_bytes, expected->max_allocated_bytes,
450 "%s: Expected %u max_allocated bytes, got %d\n",
451 str, expected->max_allocated_bytes,
452 query.max_allocated_bytes);
453 #endif
454 }
455
ZTEST(obj_core_stats_mem_block,test_obj_core_stats_mem_block)456 ZTEST(obj_core_stats_mem_block, test_obj_core_stats_mem_block)
457 {
458 struct sys_mem_blocks_info raw = {
459 .num_blocks = 4, .blk_sz_shift = 5,
460 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
461 .used_blocks = 0, .max_used_blocks = 0
462 #endif
463 };
464 struct sys_memory_stats query = {
465 .free_bytes = 128,
466 .allocated_bytes = 0,
467 .max_allocated_bytes = 0
468 };
469 void *mem1;
470 void *mem2;
471 int status;
472
473 /*
474 * As the ordering of the "raw", "query" and "reset" tests matter,
475 * they have been grouped together here. As they are for the most
476 * wrappers for the runtime stats routines, minimal testing is
477 * being done.
478 */
479
480 /* Initial checks */
481
482 test_mem_block_raw("Initial", &raw);
483 test_mem_block_query("Initial", &query);
484
485 /* Allocate 1st block */
486
487 status = sys_mem_blocks_alloc(&mem_block, 1, &mem1);
488 zassert_equal(status, 0, "Expected 0, got %d\n", status);
489
490 query.free_bytes -= 32;
491 query.allocated_bytes += 32;
492 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
493 raw.used_blocks++;
494 raw.max_used_blocks++;
495 query.max_allocated_bytes += 32;
496 #endif
497 test_mem_block_raw("1st Alloc", &raw);
498 test_mem_block_query("1st Alloc", &query);
499
500 /* Allocate 2nd block */
501
502 status = sys_mem_blocks_alloc(&mem_block, 1, &mem2);
503 zassert_equal(status, 0, "Expected 0, got %d\n", status);
504
505 query.free_bytes -= 32;
506 query.allocated_bytes += 32;
507 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
508 raw.used_blocks++;
509 raw.max_used_blocks++;
510 query.max_allocated_bytes += 32;
511 #endif
512 test_mem_block_raw("2nd Alloc", &raw);
513 test_mem_block_query("2nd Alloc", &query);
514
515 /* Free 1st block */
516
517 sys_mem_blocks_free(&mem_block, 1, &mem1);
518
519 query.free_bytes += 32;
520 query.allocated_bytes -= 32;
521 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
522 raw.used_blocks--;
523 #endif
524 test_mem_block_raw("Free 1st", &raw);
525 test_mem_block_query("Free 1st", &query);
526
527 /* Reset the mem block stats */
528
529 status = k_obj_core_stats_reset(K_OBJ_CORE(&mem_block));
530 zassert_equal(status, 0, "Expected 0, got %d\n", status);
531 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
532 raw.max_used_blocks = raw.used_blocks;
533 query.max_allocated_bytes = query.allocated_bytes;
534 #endif
535 test_mem_block_raw("Reset", &raw);
536 test_mem_block_query("Reset", &query);
537
538 /* Cleanup - Free 2nd block */
539 sys_mem_blocks_free(&mem_block, 1, &mem2);
540 }
541
542 /***************** MEMORY SLABS *********************/
543
ZTEST(obj_core_stats_mem_slab,test_mem_slab_enable)544 ZTEST(obj_core_stats_mem_slab, test_mem_slab_enable)
545 {
546 int status;
547
548 status = k_obj_core_stats_disable(K_OBJ_CORE(&mem_slab));
549 zassert_equal(status, -ENOTSUP,
550 "Not supposed to be supported. Got %d, not %d\n",
551 status, -ENOTSUP);
552 }
553
ZTEST(obj_core_stats_mem_slab,test_mem_slab_disable)554 ZTEST(obj_core_stats_mem_slab, test_mem_slab_disable)
555 {
556 int status;
557
558 status = k_obj_core_stats_disable(K_OBJ_CORE(&mem_slab));
559 zassert_equal(status, -ENOTSUP,
560 "Not supposed to be supported. Got %d, not %d\n",
561 status, -ENOTSUP);
562 }
563
test_mem_slab_raw(const char * str,struct k_mem_slab_info * expected)564 static void test_mem_slab_raw(const char *str, struct k_mem_slab_info *expected)
565 {
566 int status;
567 struct k_mem_slab_info raw;
568
569 status = k_obj_core_stats_raw(K_OBJ_CORE(&mem_slab), &raw,
570 sizeof(raw));
571 zassert_equal(status, 0,
572 "%s: Failed to get raw stats (%d)\n", str, status);
573
574 zassert_equal(raw.num_blocks, expected->num_blocks,
575 "%s: Expected %u blocks, got %u\n",
576 str, expected->num_blocks, raw.num_blocks);
577 zassert_equal(raw.block_size, expected->block_size,
578 "%s: Expected block size=%u blocks, got %u\n",
579 str, expected->block_size, raw.block_size);
580 zassert_equal(raw.num_used, expected->num_used,
581 "%s: Expected %u used, got %d\n",
582 str, expected->num_used, raw.num_used);
583 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
584 zassert_equal(raw.max_used, expected->max_used,
585 "%s: Expected max %u used, got %d\n",
586 str, expected->max_used, raw.max_used);
587 #endif
588 }
589
test_mem_slab_query(const char * str,struct sys_memory_stats * expected)590 static void test_mem_slab_query(const char *str,
591 struct sys_memory_stats *expected)
592 {
593 struct sys_memory_stats query;
594 int status;
595
596 status = k_obj_core_stats_query(K_OBJ_CORE(&mem_slab), &query,
597 sizeof(query));
598 zassert_equal(status, 0,
599 "%s: Failed to get query stats (%d)\n", str, status);
600
601 zassert_equal(query.free_bytes, expected->free_bytes,
602 "%s: Expected %u free bytes, got %u\n",
603 str, expected->free_bytes, query.free_bytes);
604 zassert_equal(query.allocated_bytes, expected->allocated_bytes,
605 "%s: Expected %u allocated bytes, got %u\n",
606 str, expected->allocated_bytes, query.allocated_bytes);
607 zassert_equal(query.max_allocated_bytes, expected->max_allocated_bytes,
608 "%s: Expected %u max_allocated bytes, got %d\n",
609 str, expected->max_allocated_bytes,
610 query.max_allocated_bytes);
611 }
612
ZTEST(obj_core_stats_mem_slab,test_obj_core_stats_mem_slab)613 ZTEST(obj_core_stats_mem_slab, test_obj_core_stats_mem_slab)
614 {
615 struct k_mem_slab_info raw = {
616 .num_blocks = 4, .block_size = 32, .num_used = 0,
617 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
618 .max_used = 0
619 #endif
620 };
621 struct sys_memory_stats query = {
622 .free_bytes = 128,
623 .allocated_bytes = 0,
624 .max_allocated_bytes = 0
625 };
626 void *mem1;
627 void *mem2;
628 int status;
629
630 /*
631 * As the ordering of the "raw", "query" and "reset" tests matter,
632 * they have been grouped together here. As they are for the most
633 * wrappers for the runtime stats routines, minimal testing is
634 * being done.
635 */
636
637
638 /* Initial checks */
639
640 test_mem_slab_raw("Initial", &raw);
641 test_mem_slab_query("Initial", &query);
642
643 /* Allocate 1st block */
644
645 status = k_mem_slab_alloc(&mem_slab, &mem1, K_FOREVER);
646 zassert_equal(status, 0, "Expected 0, got %d\n", status);
647
648 raw.num_used++;
649 query.free_bytes -= 32;
650 query.allocated_bytes += 32;
651 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
652 raw.max_used++;
653 query.max_allocated_bytes += 32;
654 #endif
655 test_mem_slab_raw("1st Alloc", &raw);
656 test_mem_slab_query("1st Alloc", &query);
657
658 /* Allocate 2nd block */
659
660 status = k_mem_slab_alloc(&mem_slab, &mem2, K_FOREVER);
661 zassert_equal(status, 0, "Expected 0, got %d\n", status);
662
663 raw.num_used++;
664 query.free_bytes -= 32;
665 query.allocated_bytes += 32;
666 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
667 raw.max_used++;
668 query.max_allocated_bytes += 32;
669 #endif
670 test_mem_slab_raw("2nd Alloc", &raw);
671 test_mem_slab_query("2nd Alloc", &query);
672
673 /* Free 1st block */
674 k_mem_slab_free(&mem_slab, mem1);
675
676 raw.num_used--;
677 query.free_bytes += 32;
678 query.allocated_bytes -= 32;
679 test_mem_slab_raw("Free 1st", &raw);
680 test_mem_slab_query("Free 1st", &query);
681
682 /* Reset the mem slab stats */
683 status = k_obj_core_stats_reset(K_OBJ_CORE(&mem_slab));
684 zassert_equal(status, 0, "Expected 0, got %d\n", status);
685 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
686 raw.max_used = raw.num_used;
687 query.max_allocated_bytes = query.allocated_bytes;
688 #endif
689 test_mem_slab_raw("Reset", &raw);
690 test_mem_slab_query("Reset", &query);
691
692 /* Cleanup - Free 2nd block */
693 k_mem_slab_free(&mem_slab, mem2);
694 }
695
696 ZTEST_SUITE(obj_core_stats_system, NULL, NULL,
697 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
698
699 ZTEST_SUITE(obj_core_stats_thread, NULL, NULL,
700 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
701
702 ZTEST_SUITE(obj_core_stats_mem_block, NULL, NULL,
703 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
704
705 ZTEST_SUITE(obj_core_stats_mem_slab, NULL, NULL,
706 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
707