1 /*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8 /**
9 * @brief Thread Tests
10 * @defgroup kernel_thread_tests Threads
11 * @ingroup all_tests
12 * @{
13 * @}
14 */
15
16 #include <zephyr/ztest.h>
17 #include <zephyr/kernel_structs.h>
18 #include <zephyr/kernel.h>
19 #include <kernel_internal.h>
20 #include <string.h>
21
22 /* internal kernel APIs */
23 #include <ksched.h>
24 #include <kthread.h>
25
26 #include <zephyr/logging/log.h>
27 LOG_MODULE_REGISTER(test);
28
29 struct k_thread tdata;
30 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
31 K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
32 size_t tstack_size = K_THREAD_STACK_SIZEOF(tstack);
33
34 /*local variables*/
35 static K_THREAD_STACK_DEFINE(tstack_custom, STACK_SIZE);
36 static K_THREAD_STACK_DEFINE(tstack_name, STACK_SIZE);
37 static struct k_thread tdata_custom;
38 static struct k_thread tdata_name;
39
40 static int main_prio;
41 static ZTEST_DMEM int tp = 10;
42
43 /**
44 * @ingroup kernel_thread_tests
45 * @brief Verify main thread
46 */
ZTEST(threads_lifecycle,test_systhreads_main)47 ZTEST(threads_lifecycle, test_systhreads_main)
48 {
49 zassert_true(main_prio == CONFIG_MAIN_THREAD_PRIORITY, "%d", CONFIG_MAIN_THREAD_PRIORITY);
50 }
51
52 /**
53 * @ingroup kernel_thread_tests
54 * @brief Verify idle thread
55 */
ZTEST(threads_lifecycle,test_systhreads_idle)56 ZTEST(threads_lifecycle, test_systhreads_idle)
57 {
58 k_msleep(100);
59 /** TESTPOINT: check working thread priority should */
60 zassert_true(k_thread_priority_get(k_current_get()) <
61 K_IDLE_PRIO, NULL);
62 }
63
customdata_entry(void * p1,void * p2,void * p3)64 static void customdata_entry(void *p1, void *p2, void *p3)
65 {
66 long data = 1U;
67
68 zassert_is_null(k_thread_custom_data_get(), NULL);
69 while (1) {
70 k_thread_custom_data_set((void *)data);
71 /* relinquish cpu for a while */
72 k_msleep(50);
73 /** TESTPOINT: custom data comparison */
74 zassert_equal(data, (long)k_thread_custom_data_get());
75 data++;
76 }
77 }
78
79 /**
80 * @ingroup kernel_thread_tests
81 * @brief Test thread custom data get/set from coop thread
82 *
83 * @see k_thread_custom_data_get(), k_thread_custom_data_set()
84 */
ZTEST(threads_lifecycle_1cpu,test_customdata_get_set_coop)85 ZTEST(threads_lifecycle_1cpu, test_customdata_get_set_coop)
86 {
87 k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE,
88 customdata_entry, NULL, NULL, NULL,
89 K_PRIO_COOP(1), 0, K_NO_WAIT);
90
91 k_msleep(500);
92
93 /* cleanup environment */
94 k_thread_abort(tid);
95 }
96
thread_name_entry(void * p1,void * p2,void * p3)97 static void thread_name_entry(void *p1, void *p2, void *p3)
98 {
99 /* Do nothing and exit */
100 }
101
102 /**
103 * @ingroup kernel_thread_tests
104 * @brief Test thread name get/set from supervisor thread
105 * @see k_thread_name_get(), k_thread_name_copy(), k_thread_name_set()
106 */
ZTEST(threads_lifecycle,test_thread_name_get_set)107 ZTEST(threads_lifecycle, test_thread_name_get_set)
108 {
109 int ret;
110 const char *thread_name;
111 char thread_buf[CONFIG_THREAD_MAX_NAME_LEN];
112
113 /* Set and get current thread's name */
114 ret = k_thread_name_set(NULL, "parent_thread");
115 zassert_equal(ret, 0, "k_thread_name_set() failed");
116 thread_name = k_thread_name_get(k_current_get());
117 zassert_true(thread_name != NULL, "thread name was null");
118 ret = strcmp(thread_name, "parent_thread");
119 zassert_equal(ret, 0, "parent thread name does not match");
120
121 /* Set and get child thread's name */
122 k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE,
123 thread_name_entry, NULL, NULL, NULL,
124 K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
125
126 ret = k_thread_name_set(tid, "customdata");
127 zassert_equal(ret, 0, "k_thread_name_set() failed");
128 ret = k_thread_name_copy(tid, thread_buf, sizeof(thread_buf));
129 zassert_equal(ret, 0, "couldn't get copied thread name");
130 ret = strcmp(thread_buf, "customdata");
131 zassert_equal(ret, 0, "child thread name does not match");
132
133 /* cleanup environment */
134 k_thread_abort(tid);
135 }
136
137 #ifdef CONFIG_USERSPACE
138 static char unreadable_string[64];
139 static char not_my_buffer[CONFIG_THREAD_MAX_NAME_LEN];
140 struct k_sem sem;
141 #endif /* CONFIG_USERSPACE */
142
143 /**
144 * @ingroup kernel_thread_tests
145 * @brief Test thread name get/set from user thread
146 * @see k_thread_name_copy(), k_thread_name_set()
147 */
ZTEST_USER(threads_lifecycle,test_thread_name_user_get_set)148 ZTEST_USER(threads_lifecycle, test_thread_name_user_get_set)
149 {
150 #ifdef CONFIG_USERSPACE
151 int ret;
152 char thread_name[CONFIG_THREAD_MAX_NAME_LEN];
153 char too_small[2];
154
155 /* Some memory-related error cases for k_thread_name_set() */
156 #if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
157 /* Non-Secure images cannot normally access memory outside the image
158 * flash and ram.
159 */
160 ret = k_thread_name_set(NULL, (const char *)CONFIG_THREAD_API_UNMAPPED_ADDRESS);
161 zassert_equal(ret, -EFAULT, "accepted nonsense string (%d)", ret);
162 #endif
163 ret = k_thread_name_set(NULL, unreadable_string);
164 zassert_equal(ret, -EFAULT, "accepted unreadable string");
165 ret = k_thread_name_set((struct k_thread *)&sem, "some name");
166 zassert_equal(ret, -EINVAL, "accepted non-thread object");
167 ret = k_thread_name_set(&z_main_thread, "some name");
168 zassert_equal(ret, -EINVAL, "no permission on thread object");
169
170 /* Set and get current thread's name */
171 ret = k_thread_name_set(NULL, "parent_thread");
172 zassert_equal(ret, 0, "k_thread_name_set() failed");
173 ret = k_thread_name_copy(k_current_get(), thread_name,
174 sizeof(thread_name));
175 zassert_equal(ret, 0, "k_thread_name_copy() failed");
176 ret = strcmp(thread_name, "parent_thread");
177 zassert_equal(ret, 0, "parent thread name does not match");
178
179 /* memory-related cases for k_thread_name_get() */
180 ret = k_thread_name_copy(k_current_get(), too_small,
181 sizeof(too_small));
182 zassert_equal(ret, -ENOSPC, "wrote to too-small buffer");
183 ret = k_thread_name_copy(k_current_get(), not_my_buffer,
184 sizeof(not_my_buffer));
185 zassert_equal(ret, -EFAULT, "wrote to buffer without permission");
186 ret = k_thread_name_copy((struct k_thread *)&sem, thread_name,
187 sizeof(thread_name));
188 zassert_equal(ret, -EINVAL, "not a thread object");
189 ret = k_thread_name_copy(&z_main_thread, thread_name,
190 sizeof(thread_name));
191 zassert_equal(ret, 0, "couldn't get main thread name: %s (%d)", thread_name, ret);
192 LOG_DBG("Main thread name is '%s'", thread_name);
193
194 /* Set and get child thread's name */
195 k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE,
196 thread_name_entry, NULL, NULL, NULL,
197 K_PRIO_PREEMPT(1), K_USER, K_NO_WAIT);
198 ret = k_thread_name_set(tid, "customdata");
199 zassert_equal(ret, 0, "k_thread_name_set() failed");
200 ret = k_thread_name_copy(tid, thread_name, sizeof(thread_name));
201 zassert_equal(ret, 0, "couldn't get copied thread name");
202 ret = strcmp(thread_name, "customdata");
203 zassert_equal(ret, 0, "child thread name does not match");
204
205 /* cleanup environment */
206 k_thread_abort(tid);
207 #else
208 ztest_test_skip();
209 #endif /* CONFIG_USERSPACE */
210 }
211
212 /**
213 * @ingroup kernel_thread_tests
214 * @brief Test thread custom data get/set from preempt thread
215 * @see k_thread_custom_data_get(), k_thread_custom_data_set()
216 */
ZTEST_USER(threads_lifecycle_1cpu,test_customdata_get_set_preempt)217 ZTEST_USER(threads_lifecycle_1cpu, test_customdata_get_set_preempt)
218 {
219 /** TESTPOINT: custom data of preempt thread */
220 k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE,
221 customdata_entry, NULL, NULL, NULL,
222 K_PRIO_PREEMPT(0), K_USER, K_NO_WAIT);
223
224 k_msleep(500);
225
226 /* cleanup environment */
227 k_thread_abort(tid);
228 }
229
umode_entry(void * thread_id,void * p2,void * p3)230 static void umode_entry(void *thread_id, void *p2, void *p3)
231 {
232 ARG_UNUSED(p2);
233 ARG_UNUSED(p3);
234
235 if (!z_is_thread_essential(_current) &&
236 (k_current_get() == (k_tid_t)thread_id)) {
237 ztest_test_pass();
238 } else {
239 zassert_unreachable("User thread is essential or thread"
240 " structure is corrupted\n");
241 }
242 }
243
244 /**
245 * @ingroup kernel_thread_tests
246 * @brief Test k_thread_user_mode_enter to cover when userspace
247 * is not supported/enabled
248 * @see k_thread_user_mode_enter()
249 */
enter_user_mode_entry(void * p1,void * p2,void * p3)250 static void enter_user_mode_entry(void *p1, void *p2, void *p3)
251 {
252 z_thread_essential_set(_current);
253
254 zassert_true(z_is_thread_essential(_current), "Thread isn't set"
255 " as essential\n");
256
257 k_thread_user_mode_enter(umode_entry,
258 k_current_get(), NULL, NULL);
259 }
260
ZTEST_USER(threads_lifecycle,test_user_mode)261 ZTEST_USER(threads_lifecycle, test_user_mode)
262 {
263 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
264 enter_user_mode_entry, NULL, NULL,
265 NULL, main_prio, K_INHERIT_PERMS, K_NO_WAIT);
266 k_msleep(100);
267 k_thread_abort(tid);
268 }
269
270 struct k_thread join_thread;
271 K_THREAD_STACK_DEFINE(join_stack, STACK_SIZE);
272
273 struct k_thread control_thread;
274 K_THREAD_STACK_DEFINE(control_stack, STACK_SIZE);
275
276 enum control_method {
277 TIMEOUT,
278 NO_WAIT,
279 SELF_ABORT,
280 OTHER_ABORT,
281 OTHER_ABORT_TIMEOUT,
282 ALREADY_EXIT,
283 ISR_ALREADY_EXIT,
284 ISR_RUNNING
285 };
286
join_entry(void * p1,void * p2,void * p3)287 static void join_entry(void *p1, void *p2, void *p3)
288 {
289 enum control_method m = (enum control_method)(intptr_t)p1;
290
291 switch (m) {
292 case TIMEOUT:
293 case NO_WAIT:
294 case OTHER_ABORT:
295 case OTHER_ABORT_TIMEOUT:
296 case ISR_RUNNING:
297 LOG_DBG("join_thread: sleeping forever");
298 k_sleep(K_FOREVER);
299 break;
300 case SELF_ABORT:
301 case ALREADY_EXIT:
302 case ISR_ALREADY_EXIT:
303 LOG_DBG("join_thread: self-exiting");
304 return;
305 }
306 }
307
control_entry(void * p1,void * p2,void * p3)308 static void control_entry(void *p1, void *p2, void *p3)
309 {
310 LOG_DBG("control_thread: killing join thread");
311 k_thread_abort(&join_thread);
312 }
313
do_join_from_isr(const void * arg)314 static void do_join_from_isr(const void *arg)
315 {
316 int *ret = (int *)arg;
317
318 zassert_true(k_is_in_isr());
319 LOG_DBG("isr: joining join_thread");
320 *ret = k_thread_join(&join_thread, K_NO_WAIT);
321 LOG_DBG("isr: k_thread_join() returned with %d", *ret);
322 }
323
324 #define JOIN_TIMEOUT_MS 100
325
join_scenario_interval(enum control_method m,int64_t * interval)326 static int join_scenario_interval(enum control_method m, int64_t *interval)
327 {
328 k_timeout_t timeout = K_FOREVER;
329 int ret = 0;
330
331 LOG_DBG("ztest_thread: method %d, create join_thread", m);
332 k_thread_create(&join_thread, join_stack, STACK_SIZE, join_entry,
333 (void *)m, NULL, NULL, K_PRIO_PREEMPT(1),
334 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
335
336 switch (m) {
337 case ALREADY_EXIT:
338 case ISR_ALREADY_EXIT:
339 /* Let join_thread run first */
340 k_msleep(50);
341 break;
342 case OTHER_ABORT_TIMEOUT:
343 timeout = K_MSEC(JOIN_TIMEOUT_MS);
344 __fallthrough;
345 case OTHER_ABORT:
346 LOG_DBG("ztest_thread: create control_thread");
347 k_thread_create(&control_thread, control_stack, STACK_SIZE,
348 control_entry, NULL, NULL, NULL,
349 K_PRIO_PREEMPT(2),
350 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
351 break;
352 case TIMEOUT:
353 timeout = K_MSEC(50);
354 break;
355 case NO_WAIT:
356 timeout = K_NO_WAIT;
357 break;
358 default:
359 break;
360 }
361
362 if (m == ISR_ALREADY_EXIT || m == ISR_RUNNING) {
363 irq_offload(do_join_from_isr, (const void *)&ret);
364 } else {
365 LOG_DBG("ztest_thread: joining join_thread");
366
367 if (interval != NULL) {
368 *interval = k_uptime_get();
369 }
370
371 ret = k_thread_join(&join_thread, timeout);
372
373 if (interval != NULL) {
374 *interval = k_uptime_get() - *interval;
375 }
376
377 LOG_DBG("ztest_thread: k_thread_join() returned with %d", ret);
378 }
379
380 if (ret != 0) {
381 k_thread_abort(&join_thread);
382 }
383 if (m == OTHER_ABORT || m == OTHER_ABORT_TIMEOUT) {
384 k_thread_join(&control_thread, K_FOREVER);
385 }
386
387 return ret;
388 }
389
join_scenario(enum control_method m)390 static inline int join_scenario(enum control_method m)
391 {
392 return join_scenario_interval(m, NULL);
393 }
394
395 /**
396 * @ingroup kernel_thread_tests
397 * @brief Test thread join
398 *
399 */
ZTEST_USER(threads_lifecycle,test_thread_join)400 ZTEST_USER(threads_lifecycle, test_thread_join)
401 {
402 int64_t interval;
403
404 #ifdef CONFIG_USERSPACE
405 /* scenario: thread never started */
406 zassert_equal(k_thread_join(&join_thread, K_FOREVER), 0,
407 "failed case thread never started");
408 #endif
409 zassert_equal(join_scenario(TIMEOUT), -EAGAIN, "failed timeout case");
410 zassert_equal(join_scenario(NO_WAIT), -EBUSY, "failed no-wait case");
411 zassert_equal(join_scenario(SELF_ABORT), 0, "failed self-abort case");
412 zassert_equal(join_scenario(OTHER_ABORT), 0, "failed other-abort case");
413
414 zassert_equal(join_scenario_interval(OTHER_ABORT_TIMEOUT, &interval),
415 0, "failed other-abort case with timeout");
416 zassert_true(interval < JOIN_TIMEOUT_MS, "join took too long (%lld ms)",
417 interval);
418 zassert_equal(join_scenario(ALREADY_EXIT), 0,
419 "failed already exit case");
420
421 }
422
423 /**
424 * @ingroup kernel_thread_tests
425 * @brief Test thread join from ISR
426 *
427 * @see k_thread_join()
428 * @see k_thread_abort()
429 */
ZTEST(threads_lifecycle,test_thread_join_isr)430 ZTEST(threads_lifecycle, test_thread_join_isr)
431 {
432 zassert_equal(join_scenario(ISR_RUNNING), -EBUSY, "failed isr running");
433 zassert_equal(join_scenario(ISR_ALREADY_EXIT), 0, "failed isr exited");
434 }
435
436 struct k_thread deadlock1_thread;
437 K_THREAD_STACK_DEFINE(deadlock1_stack, STACK_SIZE);
438
439 struct k_thread deadlock2_thread;
440 K_THREAD_STACK_DEFINE(deadlock2_stack, STACK_SIZE);
441
deadlock1_entry(void * p1,void * p2,void * p3)442 static void deadlock1_entry(void *p1, void *p2, void *p3)
443 {
444 int ret;
445
446 k_msleep(500);
447
448 ret = k_thread_join(&deadlock2_thread, K_FOREVER);
449 zassert_equal(ret, -EDEADLK, "failed mutual join case");
450 }
451
deadlock2_entry(void * p1,void * p2,void * p3)452 static void deadlock2_entry(void *p1, void *p2, void *p3)
453 {
454 int ret;
455
456 /* deadlock1_thread is active but currently sleeping */
457 ret = k_thread_join(&deadlock1_thread, K_FOREVER);
458
459 zassert_equal(ret, 0, "couldn't join deadlock2_thread");
460 }
461
462
463
464 /**
465 * @brief Test case for thread join deadlock scenarios.
466 *
467 * This test verifies the behavior of the `k_thread_join` API in scenarios
468 * that could lead to deadlocks. It includes the following checks:
469 *
470 * - Ensures that a thread cannot join itself, which would result in a
471 * self-deadlock. The API should return `-EDEADLK` in this case.
472 * - Creates two threads (`deadlock1_thread` and `deadlock2_thread`) and
473 * verifies that they can be joined successfully without causing a deadlock.
474 *
475 * @ingroup kernel_thread_tests
476 */
ZTEST_USER(threads_lifecycle,test_thread_join_deadlock)477 ZTEST_USER(threads_lifecycle, test_thread_join_deadlock)
478 {
479 /* Deadlock scenarios */
480 zassert_equal(k_thread_join(k_current_get(), K_FOREVER), -EDEADLK,
481 "failed self-deadlock case");
482
483 k_thread_create(&deadlock1_thread, deadlock1_stack, STACK_SIZE,
484 deadlock1_entry, NULL, NULL, NULL,
485 K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT);
486 k_thread_create(&deadlock2_thread, deadlock2_stack, STACK_SIZE,
487 deadlock2_entry, NULL, NULL, NULL,
488 K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT);
489
490 zassert_equal(k_thread_join(&deadlock1_thread, K_FOREVER), 0,
491 "couldn't join deadlock1_thread");
492 zassert_equal(k_thread_join(&deadlock2_thread, K_FOREVER), 0,
493 "couldn't join deadlock2_thread");
494 }
495
496 #define WAIT_TO_START_MS 100
497 /*
498 * entry for a delayed thread, do nothing. After the thread is created,
499 * just check how many ticks expires and how many ticks remain before
500 * the thread start
501 */
user_start_thread(void * p1,void * p2,void * p3)502 static void user_start_thread(void *p1, void *p2, void *p3)
503 {
504 /* do nothing */
505 }
506 /**
507 * @brief Test case for verifying thread timeout expiration and remaining time.
508 *
509 * @ingroup kernel_thread_tests
510 */
511
ZTEST_USER(threads_lifecycle,test_thread_timeout_remaining_expires)512 ZTEST_USER(threads_lifecycle, test_thread_timeout_remaining_expires)
513 {
514 k_ticks_t r, e, r1, ticks, expected_expires_ticks;
515
516 ticks = k_ms_to_ticks_ceil32(WAIT_TO_START_MS);
517 expected_expires_ticks = k_uptime_ticks() + ticks;
518
519 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
520 user_start_thread, k_current_get(), NULL,
521 NULL, 0, K_USER,
522 K_MSEC(WAIT_TO_START_MS));
523
524 k_msleep(10);
525 e = k_thread_timeout_expires_ticks(tid);
526 LOG_DBG("thread_expires_ticks: %d, expect: %d", (int)e,
527 (int)expected_expires_ticks);
528 zassert_true(e >= expected_expires_ticks);
529
530 k_msleep(10);
531 r = k_thread_timeout_remaining_ticks(tid);
532 zassert_true(r < ticks);
533 r1 = r;
534
535 k_msleep(10);
536 r = k_thread_timeout_remaining_ticks(tid);
537 zassert_true(r < r1);
538
539 k_thread_abort(tid);
540 }
541
foreach_callback(const struct k_thread * thread,void * user_data)542 static void foreach_callback(const struct k_thread *thread, void *user_data)
543 {
544
545 k_thread_runtime_stats_t stats;
546 int ret;
547
548 if (z_is_idle_thread_object((k_tid_t)thread)) {
549 return;
550 }
551
552 /* Check NULL parameters */
553 ret = k_thread_runtime_stats_get(NULL, &stats);
554 zassert_true(ret == -EINVAL);
555 ret = k_thread_runtime_stats_get((k_tid_t)thread, NULL);
556 zassert_true(ret == -EINVAL);
557
558 k_thread_runtime_stats_get((k_tid_t)thread, &stats);
559 ((k_thread_runtime_stats_t *)user_data)->execution_cycles +=
560 stats.execution_cycles;
561 }
562
563 /**
564 * @brief Test case for thread runtime statistics retrieval in Zephyr kernel
565 *
566 * This case accumulates every thread's execution_cycles first, then
567 * get the total execution_cycles from a global
568 * k_thread_runtime_stats_t to see that all time is reflected in the
569 * total.
570 *
571 * @ingroup kernel_thread_tests
572 * @see k_thread_runtime_stats_get()
573 */
ZTEST(threads_lifecycle,test_thread_runtime_stats_get)574 ZTEST(threads_lifecycle, test_thread_runtime_stats_get)
575 {
576 k_thread_runtime_stats_t stats, stats_all;
577 int ret;
578
579 stats.execution_cycles = 0;
580
581 k_thread_foreach(foreach_callback, &stats);
582
583 /* Check NULL parameters */
584 ret = k_thread_runtime_stats_all_get(NULL);
585 zassert_true(ret == -EINVAL);
586
587 k_thread_runtime_stats_all_get(&stats_all);
588
589 zassert_true(stats.execution_cycles <= stats_all.execution_cycles);
590 }
591
592
593 /**
594 * @brief Test the behavior of k_busy_wait with thread runtime statistics.
595 *
596 * This test verifies the accuracy of the `k_busy_wait` function by checking
597 * the thread's execution cycle statistics before and after calling the function.
598 */
ZTEST(threads_lifecycle,test_k_busy_wait)599 ZTEST(threads_lifecycle, test_k_busy_wait)
600 {
601 uint64_t cycles, dt;
602 k_thread_runtime_stats_t test_stats;
603
604 k_thread_runtime_stats_get(k_current_get(), &test_stats);
605 cycles = test_stats.execution_cycles;
606 k_busy_wait(0);
607 k_thread_runtime_stats_get(k_current_get(), &test_stats);
608
609 /* execution_cycles doesn't increase significantly after 0
610 * usec (10ms slop experimentally determined,
611 * non-deterministic software emulators are VERY slow wrt
612 * their cycle rate)
613 */
614 dt = test_stats.execution_cycles - cycles;
615 zassert_true(dt < k_ms_to_cyc_ceil64(10));
616
617 cycles = test_stats.execution_cycles;
618 k_busy_wait(100);
619 k_thread_runtime_stats_get(k_current_get(), &test_stats);
620
621 /* execution_cycles increases correctly */
622 dt = test_stats.execution_cycles - cycles;
623
624 /* execution cycles may not increase by the full 100µs as the
625 * system may be doing something else during the busy
626 * wait. Experimentally, we see at least 80% of the cycles
627 * consumed in the busy wait loop on current test targets.
628 */
629 zassert_true(dt >= k_us_to_cyc_floor64(80));
630 }
631
tp_entry(void * p1,void * p2,void * p3)632 static void tp_entry(void *p1, void *p2, void *p3)
633 {
634 tp = 100;
635 }
636
637 /**
638 * @brief Test the behavior of k_busy_wait with thread runtime statistics
639 * in user mode.
640 *
641 * @ingroup kernel_thread_tests
642 */
ZTEST_USER(threads_lifecycle_1cpu,test_k_busy_wait_user)643 ZTEST_USER(threads_lifecycle_1cpu, test_k_busy_wait_user)
644 {
645
646 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
647 tp_entry, NULL, NULL,
648 NULL, 0, K_USER, K_NO_WAIT);
649 k_busy_wait(1000);
650 /* this is a 1cpu test case, the new thread has no chance to be
651 * scheduled and value of tp not changed
652 */
653 zassert_false(tp == 100);
654
655 /* give up cpu, the new thread will change value of tp to 100 */
656 k_msleep(100);
657 zassert_true(tp == 100);
658 k_thread_abort(tid);
659 }
660
661 #define INT_ARRAY_SIZE 128
large_stack(size_t * space)662 static int large_stack(size_t *space)
663 {
664 /* use "volatile" to protect this variable from being optimized out */
665 volatile int a[INT_ARRAY_SIZE];
666
667 /* to avoid unused variable error */
668 a[0] = 1;
669 return k_thread_stack_space_get(k_current_get(), space);
670
671 }
672
small_stack(size_t * space)673 static int small_stack(size_t *space)
674 {
675 return k_thread_stack_space_get(k_current_get(), space);
676 }
677
678 /**
679 * @brief Test k_thread_stack_sapce_get
680 *
681 * Test k_thread_stack_sapce_get unused stack space in large_stack_space()
682 * is smaller than that in small_stack() because the former function has a
683 * large local variable
684 *
685 * @ingroup kernel_thread_tests
686 */
ZTEST_USER(threads_lifecycle,test_k_thread_stack_space_get_user)687 ZTEST_USER(threads_lifecycle, test_k_thread_stack_space_get_user)
688 {
689 size_t a, b;
690
691 small_stack(&a);
692 large_stack(&b);
693 /* FIXME: Ideally, the follow condition will assert true:
694 * (a - b) == INT_ARRAY_SIZE * sizeof(int)
695 * but it is not the case in native_sim, qemu_leon3 and
696 * qemu_cortex_a53. Relax check condition here
697 */
698 zassert_true(b <= a);
699 }
700
thread_test_setup(void)701 static void *thread_test_setup(void)
702 {
703 k_thread_access_grant(k_current_get(), &tdata, tstack,
704 &tdata_custom, tstack_custom,
705 &tdata_name, tstack_name,
706 &join_thread, join_stack,
707 &control_thread, control_stack,
708 &deadlock1_thread, deadlock1_stack,
709 &deadlock2_thread, deadlock2_stack);
710 main_prio = k_thread_priority_get(k_current_get());
711 #ifdef CONFIG_USERSPACE
712 strncpy(unreadable_string, "unreadable string",
713 sizeof(unreadable_string));
714 #endif
715
716 return NULL;
717 }
718
719 ZTEST_SUITE(threads_lifecycle, NULL, thread_test_setup, NULL, NULL, NULL);
720 ZTEST_SUITE(threads_lifecycle_1cpu, NULL, thread_test_setup,
721 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
722