1 /*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8 /**
9 * @brief Thread Tests
10 * @defgroup kernel_thread_tests Threads
11 * @ingroup all_tests
12 * @{
13 * @}
14 */
15
16 #include <zephyr/ztest.h>
17 #include <zephyr/kernel_structs.h>
18 #include <zephyr/kernel.h>
19 #include <kernel_internal.h>
20 #include <string.h>
21
22 /* internal kernel APIs */
23 #include <ksched.h>
24 #include <kthread.h>
25
26 #include <zephyr/logging/log.h>
27 LOG_MODULE_REGISTER(test);
28
29 struct k_thread tdata;
30 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
31 K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
32 size_t tstack_size = K_THREAD_STACK_SIZEOF(tstack);
33
34 /*local variables*/
35 static K_THREAD_STACK_DEFINE(tstack_custom, STACK_SIZE);
36 static K_THREAD_STACK_DEFINE(tstack_name, STACK_SIZE);
37 static struct k_thread tdata_custom;
38 static struct k_thread tdata_name;
39
40 static int main_prio;
41 static ZTEST_DMEM int tp = 10;
42
43 /**
44 * @ingroup kernel_thread_tests
45 * @brief Verify main thread
46 */
ZTEST(threads_lifecycle,test_systhreads_main)47 ZTEST(threads_lifecycle, test_systhreads_main)
48 {
49 zassert_true(main_prio == CONFIG_MAIN_THREAD_PRIORITY, "%d", CONFIG_MAIN_THREAD_PRIORITY);
50 }
51
52 /**
53 * @ingroup kernel_thread_tests
54 * @brief Verify idle thread
55 */
ZTEST(threads_lifecycle,test_systhreads_idle)56 ZTEST(threads_lifecycle, test_systhreads_idle)
57 {
58 k_msleep(100);
59 /** TESTPOINT: check working thread priority should */
60 zassert_true(k_thread_priority_get(k_current_get()) <
61 K_IDLE_PRIO, NULL);
62 }
63
customdata_entry(void * p1,void * p2,void * p3)64 static void customdata_entry(void *p1, void *p2, void *p3)
65 {
66 long data = 1U;
67
68 zassert_is_null(k_thread_custom_data_get(), NULL);
69 while (1) {
70 k_thread_custom_data_set((void *)data);
71 /* relinquish cpu for a while */
72 k_msleep(50);
73 /** TESTPOINT: custom data comparison */
74 zassert_equal(data, (long)k_thread_custom_data_get());
75 data++;
76 }
77 }
78
79 /**
80 * @ingroup kernel_thread_tests
81 * @brief test thread custom data get/set from coop thread
82 *
83 * @see k_thread_custom_data_get(), k_thread_custom_data_set()
84 */
ZTEST(threads_lifecycle_1cpu,test_customdata_get_set_coop)85 ZTEST(threads_lifecycle_1cpu, test_customdata_get_set_coop)
86 {
87 k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE,
88 customdata_entry, NULL, NULL, NULL,
89 K_PRIO_COOP(1), 0, K_NO_WAIT);
90
91 k_msleep(500);
92
93 /* cleanup environment */
94 k_thread_abort(tid);
95 }
96
thread_name_entry(void * p1,void * p2,void * p3)97 static void thread_name_entry(void *p1, void *p2, void *p3)
98 {
99 /* Do nothing and exit */
100 }
101
102 /**
103 * @ingroup kernel_thread_tests
104 * @brief test thread name get/set from supervisor thread
105 * @see k_thread_name_get(), k_thread_name_copy(), k_thread_name_set()
106 */
ZTEST(threads_lifecycle,test_thread_name_get_set)107 ZTEST(threads_lifecycle, test_thread_name_get_set)
108 {
109 int ret;
110 const char *thread_name;
111 char thread_buf[CONFIG_THREAD_MAX_NAME_LEN];
112
113 /* Set and get current thread's name */
114 ret = k_thread_name_set(NULL, "parent_thread");
115 zassert_equal(ret, 0, "k_thread_name_set() failed");
116 thread_name = k_thread_name_get(k_current_get());
117 zassert_true(thread_name != NULL, "thread name was null");
118 ret = strcmp(thread_name, "parent_thread");
119 zassert_equal(ret, 0, "parent thread name does not match");
120
121 /* Set and get child thread's name */
122 k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE,
123 thread_name_entry, NULL, NULL, NULL,
124 K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
125
126 ret = k_thread_name_set(tid, "customdata");
127 zassert_equal(ret, 0, "k_thread_name_set() failed");
128 ret = k_thread_name_copy(tid, thread_buf, sizeof(thread_buf));
129 zassert_equal(ret, 0, "couldn't get copied thread name");
130 ret = strcmp(thread_buf, "customdata");
131 zassert_equal(ret, 0, "child thread name does not match");
132
133 /* cleanup environment */
134 k_thread_abort(tid);
135 }
136
137 #ifdef CONFIG_USERSPACE
138 static char unreadable_string[64];
139 static char not_my_buffer[CONFIG_THREAD_MAX_NAME_LEN];
140 struct k_sem sem;
141 #endif /* CONFIG_USERSPACE */
142
143 /**
144 * @ingroup kernel_thread_tests
145 * @brief test thread name get/set from user thread
146 * @see k_thread_name_copy(), k_thread_name_set()
147 */
ZTEST_USER(threads_lifecycle,test_thread_name_user_get_set)148 ZTEST_USER(threads_lifecycle, test_thread_name_user_get_set)
149 {
150 #ifdef CONFIG_USERSPACE
151 int ret;
152 char thread_name[CONFIG_THREAD_MAX_NAME_LEN];
153 char too_small[2];
154
155 /* Some memory-related error cases for k_thread_name_set() */
156 #if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
157 /* Non-Secure images cannot normally access memory outside the image
158 * flash and ram.
159 */
160 ret = k_thread_name_set(NULL, (const char *)0xFFFFFFF0);
161 zassert_equal(ret, -EFAULT, "accepted nonsense string (%d)", ret);
162 #endif
163 ret = k_thread_name_set(NULL, unreadable_string);
164 zassert_equal(ret, -EFAULT, "accepted unreadable string");
165 ret = k_thread_name_set((struct k_thread *)&sem, "some name");
166 zassert_equal(ret, -EINVAL, "accepted non-thread object");
167 ret = k_thread_name_set(&z_main_thread, "some name");
168 zassert_equal(ret, -EINVAL, "no permission on thread object");
169
170 /* Set and get current thread's name */
171 ret = k_thread_name_set(NULL, "parent_thread");
172 zassert_equal(ret, 0, "k_thread_name_set() failed");
173 ret = k_thread_name_copy(k_current_get(), thread_name,
174 sizeof(thread_name));
175 zassert_equal(ret, 0, "k_thread_name_copy() failed");
176 ret = strcmp(thread_name, "parent_thread");
177 zassert_equal(ret, 0, "parent thread name does not match");
178
179 /* memory-related cases for k_thread_name_get() */
180 ret = k_thread_name_copy(k_current_get(), too_small,
181 sizeof(too_small));
182 zassert_equal(ret, -ENOSPC, "wrote to too-small buffer");
183 ret = k_thread_name_copy(k_current_get(), not_my_buffer,
184 sizeof(not_my_buffer));
185 zassert_equal(ret, -EFAULT, "wrote to buffer without permission");
186 ret = k_thread_name_copy((struct k_thread *)&sem, thread_name,
187 sizeof(thread_name));
188 zassert_equal(ret, -EINVAL, "not a thread object");
189 ret = k_thread_name_copy(&z_main_thread, thread_name,
190 sizeof(thread_name));
191 zassert_equal(ret, 0, "couldn't get main thread name: %s (%d)", thread_name, ret);
192 LOG_DBG("Main thread name is '%s'", thread_name);
193
194 /* Set and get child thread's name */
195 k_tid_t tid = k_thread_create(&tdata_name, tstack_name, STACK_SIZE,
196 thread_name_entry, NULL, NULL, NULL,
197 K_PRIO_PREEMPT(1), K_USER, K_NO_WAIT);
198 ret = k_thread_name_set(tid, "customdata");
199 zassert_equal(ret, 0, "k_thread_name_set() failed");
200 ret = k_thread_name_copy(tid, thread_name, sizeof(thread_name));
201 zassert_equal(ret, 0, "couldn't get copied thread name");
202 ret = strcmp(thread_name, "customdata");
203 zassert_equal(ret, 0, "child thread name does not match");
204
205 /* cleanup environment */
206 k_thread_abort(tid);
207 #else
208 ztest_test_skip();
209 #endif /* CONFIG_USERSPACE */
210 }
211
212 /**
213 * @ingroup kernel_thread_tests
214 * @brief test thread custom data get/set from preempt thread
215 * @see k_thread_custom_data_get(), k_thread_custom_data_set()
216 */
ZTEST_USER(threads_lifecycle_1cpu,test_customdata_get_set_preempt)217 ZTEST_USER(threads_lifecycle_1cpu, test_customdata_get_set_preempt)
218 {
219 /** TESTPOINT: custom data of preempt thread */
220 k_tid_t tid = k_thread_create(&tdata_custom, tstack_custom, STACK_SIZE,
221 customdata_entry, NULL, NULL, NULL,
222 K_PRIO_PREEMPT(0), K_USER, K_NO_WAIT);
223
224 k_msleep(500);
225
226 /* cleanup environment */
227 k_thread_abort(tid);
228 }
229
umode_entry(void * thread_id,void * p2,void * p3)230 static void umode_entry(void *thread_id, void *p2, void *p3)
231 {
232 ARG_UNUSED(p2);
233 ARG_UNUSED(p3);
234
235 if (!z_is_thread_essential(arch_current_thread()) &&
236 (k_current_get() == (k_tid_t)thread_id)) {
237 ztest_test_pass();
238 } else {
239 zassert_unreachable("User thread is essential or thread"
240 " structure is corrupted\n");
241 }
242 }
243
244 /**
245 * @ingroup kernel_thread_tests
246 * @brief Test k_thread_user_mode_enter() to cover when userspace
247 * is not supported/enabled
248 * @see k_thread_user_mode_enter()
249 */
enter_user_mode_entry(void * p1,void * p2,void * p3)250 static void enter_user_mode_entry(void *p1, void *p2, void *p3)
251 {
252 z_thread_essential_set(arch_current_thread());
253
254 zassert_true(z_is_thread_essential(arch_current_thread()), "Thread isn't set"
255 " as essential\n");
256
257 k_thread_user_mode_enter(umode_entry,
258 k_current_get(), NULL, NULL);
259 }
260
ZTEST_USER(threads_lifecycle,test_user_mode)261 ZTEST_USER(threads_lifecycle, test_user_mode)
262 {
263 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
264 enter_user_mode_entry, NULL, NULL,
265 NULL, main_prio, K_INHERIT_PERMS, K_NO_WAIT);
266 k_msleep(100);
267 k_thread_abort(tid);
268 }
269
270 struct k_thread join_thread;
271 K_THREAD_STACK_DEFINE(join_stack, STACK_SIZE);
272
273 struct k_thread control_thread;
274 K_THREAD_STACK_DEFINE(control_stack, STACK_SIZE);
275
276 enum control_method {
277 TIMEOUT,
278 NO_WAIT,
279 SELF_ABORT,
280 OTHER_ABORT,
281 OTHER_ABORT_TIMEOUT,
282 ALREADY_EXIT,
283 ISR_ALREADY_EXIT,
284 ISR_RUNNING
285 };
286
join_entry(void * p1,void * p2,void * p3)287 static void join_entry(void *p1, void *p2, void *p3)
288 {
289 enum control_method m = (enum control_method)(intptr_t)p1;
290
291 switch (m) {
292 case TIMEOUT:
293 case NO_WAIT:
294 case OTHER_ABORT:
295 case OTHER_ABORT_TIMEOUT:
296 case ISR_RUNNING:
297 LOG_DBG("join_thread: sleeping forever");
298 k_sleep(K_FOREVER);
299 break;
300 case SELF_ABORT:
301 case ALREADY_EXIT:
302 case ISR_ALREADY_EXIT:
303 LOG_DBG("join_thread: self-exiting");
304 return;
305 }
306 }
307
control_entry(void * p1,void * p2,void * p3)308 static void control_entry(void *p1, void *p2, void *p3)
309 {
310 LOG_DBG("control_thread: killing join thread");
311 k_thread_abort(&join_thread);
312 }
313
do_join_from_isr(const void * arg)314 static void do_join_from_isr(const void *arg)
315 {
316 int *ret = (int *)arg;
317
318 zassert_true(k_is_in_isr());
319 LOG_DBG("isr: joining join_thread");
320 *ret = k_thread_join(&join_thread, K_NO_WAIT);
321 LOG_DBG("isr: k_thread_join() returned with %d", *ret);
322 }
323
324 #define JOIN_TIMEOUT_MS 100
325
join_scenario_interval(enum control_method m,int64_t * interval)326 static int join_scenario_interval(enum control_method m, int64_t *interval)
327 {
328 k_timeout_t timeout = K_FOREVER;
329 int ret;
330
331 LOG_DBG("ztest_thread: method %d, create join_thread", m);
332 k_thread_create(&join_thread, join_stack, STACK_SIZE, join_entry,
333 (void *)m, NULL, NULL, K_PRIO_PREEMPT(1),
334 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
335
336 switch (m) {
337 case ALREADY_EXIT:
338 case ISR_ALREADY_EXIT:
339 /* Let join_thread run first */
340 k_msleep(50);
341 break;
342 case OTHER_ABORT_TIMEOUT:
343 timeout = K_MSEC(JOIN_TIMEOUT_MS);
344 __fallthrough;
345 case OTHER_ABORT:
346 LOG_DBG("ztest_thread: create control_thread");
347 k_thread_create(&control_thread, control_stack, STACK_SIZE,
348 control_entry, NULL, NULL, NULL,
349 K_PRIO_PREEMPT(2),
350 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
351 break;
352 case TIMEOUT:
353 timeout = K_MSEC(50);
354 break;
355 case NO_WAIT:
356 timeout = K_NO_WAIT;
357 break;
358 default:
359 break;
360 }
361
362 if (m == ISR_ALREADY_EXIT || m == ISR_RUNNING) {
363 irq_offload(do_join_from_isr, (const void *)&ret);
364 } else {
365 LOG_DBG("ztest_thread: joining join_thread");
366
367 if (interval != NULL) {
368 *interval = k_uptime_get();
369 }
370
371 ret = k_thread_join(&join_thread, timeout);
372
373 if (interval != NULL) {
374 *interval = k_uptime_get() - *interval;
375 }
376
377 LOG_DBG("ztest_thread: k_thread_join() returned with %d", ret);
378 }
379
380 if (ret != 0) {
381 k_thread_abort(&join_thread);
382 }
383 if (m == OTHER_ABORT || m == OTHER_ABORT_TIMEOUT) {
384 k_thread_join(&control_thread, K_FOREVER);
385 }
386
387 return ret;
388 }
389
join_scenario(enum control_method m)390 static inline int join_scenario(enum control_method m)
391 {
392 return join_scenario_interval(m, NULL);
393 }
394
ZTEST_USER(threads_lifecycle,test_thread_join)395 ZTEST_USER(threads_lifecycle, test_thread_join)
396 {
397 int64_t interval;
398
399 #ifdef CONFIG_USERSPACE
400 /* scenario: thread never started */
401 zassert_equal(k_thread_join(&join_thread, K_FOREVER), 0,
402 "failed case thread never started");
403 #endif
404 zassert_equal(join_scenario(TIMEOUT), -EAGAIN, "failed timeout case");
405 zassert_equal(join_scenario(NO_WAIT), -EBUSY, "failed no-wait case");
406 zassert_equal(join_scenario(SELF_ABORT), 0, "failed self-abort case");
407 zassert_equal(join_scenario(OTHER_ABORT), 0, "failed other-abort case");
408
409 zassert_equal(join_scenario_interval(OTHER_ABORT_TIMEOUT, &interval),
410 0, "failed other-abort case with timeout");
411 zassert_true(interval < JOIN_TIMEOUT_MS, "join took too long (%lld ms)",
412 interval);
413 zassert_equal(join_scenario(ALREADY_EXIT), 0,
414 "failed already exit case");
415
416 }
417
ZTEST(threads_lifecycle,test_thread_join_isr)418 ZTEST(threads_lifecycle, test_thread_join_isr)
419 {
420 zassert_equal(join_scenario(ISR_RUNNING), -EBUSY, "failed isr running");
421 zassert_equal(join_scenario(ISR_ALREADY_EXIT), 0, "failed isr exited");
422 }
423
424 struct k_thread deadlock1_thread;
425 K_THREAD_STACK_DEFINE(deadlock1_stack, STACK_SIZE);
426
427 struct k_thread deadlock2_thread;
428 K_THREAD_STACK_DEFINE(deadlock2_stack, STACK_SIZE);
429
deadlock1_entry(void * p1,void * p2,void * p3)430 static void deadlock1_entry(void *p1, void *p2, void *p3)
431 {
432 int ret;
433
434 k_msleep(500);
435
436 ret = k_thread_join(&deadlock2_thread, K_FOREVER);
437 zassert_equal(ret, -EDEADLK, "failed mutual join case");
438 }
439
deadlock2_entry(void * p1,void * p2,void * p3)440 static void deadlock2_entry(void *p1, void *p2, void *p3)
441 {
442 int ret;
443
444 /* deadlock1_thread is active but currently sleeping */
445 ret = k_thread_join(&deadlock1_thread, K_FOREVER);
446
447 zassert_equal(ret, 0, "couldn't join deadlock2_thread");
448 }
449
ZTEST_USER(threads_lifecycle,test_thread_join_deadlock)450 ZTEST_USER(threads_lifecycle, test_thread_join_deadlock)
451 {
452 /* Deadlock scenarios */
453 zassert_equal(k_thread_join(k_current_get(), K_FOREVER), -EDEADLK,
454 "failed self-deadlock case");
455
456 k_thread_create(&deadlock1_thread, deadlock1_stack, STACK_SIZE,
457 deadlock1_entry, NULL, NULL, NULL,
458 K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT);
459 k_thread_create(&deadlock2_thread, deadlock2_stack, STACK_SIZE,
460 deadlock2_entry, NULL, NULL, NULL,
461 K_PRIO_PREEMPT(1), K_USER | K_INHERIT_PERMS, K_NO_WAIT);
462
463 zassert_equal(k_thread_join(&deadlock1_thread, K_FOREVER), 0,
464 "couldn't join deadlock1_thread");
465 zassert_equal(k_thread_join(&deadlock2_thread, K_FOREVER), 0,
466 "couldn't join deadlock2_thread");
467 }
468
469 #define WAIT_TO_START_MS 100
470 /*
471 * entry for a delayed thread, do nothing. After the thread is created,
472 * just check how many ticks expires and how many ticks remain before
473 * the thread start
474 */
user_start_thread(void * p1,void * p2,void * p3)475 static void user_start_thread(void *p1, void *p2, void *p3)
476 {
477 /* do nothing */
478 }
479
ZTEST_USER(threads_lifecycle,test_thread_timeout_remaining_expires)480 ZTEST_USER(threads_lifecycle, test_thread_timeout_remaining_expires)
481 {
482 k_ticks_t r, e, r1, ticks, expected_expires_ticks;
483
484 ticks = k_ms_to_ticks_ceil32(WAIT_TO_START_MS);
485 expected_expires_ticks = k_uptime_ticks() + ticks;
486
487 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
488 user_start_thread, k_current_get(), NULL,
489 NULL, 0, K_USER,
490 K_MSEC(WAIT_TO_START_MS));
491
492 k_msleep(10);
493 e = k_thread_timeout_expires_ticks(tid);
494 LOG_DBG("thread_expires_ticks: %d, expect: %d", (int)e,
495 (int)expected_expires_ticks);
496 zassert_true(e >= expected_expires_ticks);
497
498 k_msleep(10);
499 r = k_thread_timeout_remaining_ticks(tid);
500 zassert_true(r < ticks);
501 r1 = r;
502
503 k_msleep(10);
504 r = k_thread_timeout_remaining_ticks(tid);
505 zassert_true(r < r1);
506
507 k_thread_abort(tid);
508 }
509
foreach_callback(const struct k_thread * thread,void * user_data)510 static void foreach_callback(const struct k_thread *thread, void *user_data)
511 {
512
513 k_thread_runtime_stats_t stats;
514 int ret;
515
516 if (z_is_idle_thread_object((k_tid_t)thread)) {
517 return;
518 }
519
520 /* Check NULL parameters */
521 ret = k_thread_runtime_stats_get(NULL, &stats);
522 zassert_true(ret == -EINVAL);
523 ret = k_thread_runtime_stats_get((k_tid_t)thread, NULL);
524 zassert_true(ret == -EINVAL);
525
526 k_thread_runtime_stats_get((k_tid_t)thread, &stats);
527 ((k_thread_runtime_stats_t *)user_data)->execution_cycles +=
528 stats.execution_cycles;
529 }
530
531 /* This case accumulates every thread's execution_cycles first, then
532 * get the total execution_cycles from a global
533 * k_thread_runtime_stats_t to see that all time is reflected in the
534 * total.
535 */
ZTEST(threads_lifecycle,test_thread_runtime_stats_get)536 ZTEST(threads_lifecycle, test_thread_runtime_stats_get)
537 {
538 k_thread_runtime_stats_t stats, stats_all;
539 int ret;
540
541 stats.execution_cycles = 0;
542
543 k_thread_foreach(foreach_callback, &stats);
544
545 /* Check NULL parameters */
546 ret = k_thread_runtime_stats_all_get(NULL);
547 zassert_true(ret == -EINVAL);
548
549 k_thread_runtime_stats_all_get(&stats_all);
550
551 zassert_true(stats.execution_cycles <= stats_all.execution_cycles);
552 }
553
ZTEST(threads_lifecycle,test_k_busy_wait)554 ZTEST(threads_lifecycle, test_k_busy_wait)
555 {
556 uint64_t cycles, dt;
557 k_thread_runtime_stats_t test_stats;
558
559 k_thread_runtime_stats_get(k_current_get(), &test_stats);
560 cycles = test_stats.execution_cycles;
561 k_busy_wait(0);
562 k_thread_runtime_stats_get(k_current_get(), &test_stats);
563
564 /* execution_cycles doesn't increase significantly after 0
565 * usec (10ms slop experimentally determined,
566 * non-deterministic software emulators are VERY slow wrt
567 * their cycle rate)
568 */
569 dt = test_stats.execution_cycles - cycles;
570 zassert_true(dt < k_ms_to_cyc_ceil64(10));
571
572 cycles = test_stats.execution_cycles;
573 k_busy_wait(100);
574 k_thread_runtime_stats_get(k_current_get(), &test_stats);
575
576 /* execution_cycles increases correctly */
577 dt = test_stats.execution_cycles - cycles;
578
579 /* execution cycles may not increase by the full 100µs as the
580 * system may be doing something else during the busy
581 * wait. Experimentally, we see at least 80% of the cycles
582 * consumed in the busy wait loop on current test targets.
583 */
584 zassert_true(dt >= k_us_to_cyc_floor64(80));
585 }
586
tp_entry(void * p1,void * p2,void * p3)587 static void tp_entry(void *p1, void *p2, void *p3)
588 {
589 tp = 100;
590 }
591
ZTEST_USER(threads_lifecycle_1cpu,test_k_busy_wait_user)592 ZTEST_USER(threads_lifecycle_1cpu, test_k_busy_wait_user)
593 {
594
595 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
596 tp_entry, NULL, NULL,
597 NULL, 0, K_USER, K_NO_WAIT);
598 k_busy_wait(1000);
599 /* this is a 1cpu test case, the new thread has no chance to be
600 * scheduled and value of tp not changed
601 */
602 zassert_false(tp == 100);
603
604 /* give up cpu, the new thread will change value of tp to 100 */
605 k_msleep(100);
606 zassert_true(tp == 100);
607 k_thread_abort(tid);
608 }
609
610 #define INT_ARRAY_SIZE 128
large_stack(size_t * space)611 static int large_stack(size_t *space)
612 {
613 /* use "volatile" to protect this variable from being optimized out */
614 volatile int a[INT_ARRAY_SIZE];
615
616 /* to avoid unused variable error */
617 a[0] = 1;
618 return k_thread_stack_space_get(k_current_get(), space);
619
620 }
621
small_stack(size_t * space)622 static int small_stack(size_t *space)
623 {
624 return k_thread_stack_space_get(k_current_get(), space);
625 }
626
627 /* test k_thread_stack_sapce_get(), unused stack space in large_stack_space()
628 * is smaller than that in small_stack() because the former function has a
629 * large local variable
630 */
ZTEST_USER(threads_lifecycle,test_k_thread_stack_space_get_user)631 ZTEST_USER(threads_lifecycle, test_k_thread_stack_space_get_user)
632 {
633 size_t a, b;
634
635 small_stack(&a);
636 large_stack(&b);
637 /* FIXME: Ideally, the follow condition will assert true:
638 * (a - b) == INT_ARRAY_SIZE * sizeof(int)
639 * but it is not the case in native_sim, qemu_leon3 and
640 * qemu_cortex_a53. Relax check condition here
641 */
642 zassert_true(b <= a);
643 }
644
thread_test_setup(void)645 static void *thread_test_setup(void)
646 {
647 k_thread_access_grant(k_current_get(), &tdata, tstack,
648 &tdata_custom, tstack_custom,
649 &tdata_name, tstack_name,
650 &join_thread, join_stack,
651 &control_thread, control_stack,
652 &deadlock1_thread, deadlock1_stack,
653 &deadlock2_thread, deadlock2_stack);
654 main_prio = k_thread_priority_get(k_current_get());
655 #ifdef CONFIG_USERSPACE
656 strncpy(unreadable_string, "unreadable string",
657 sizeof(unreadable_string));
658 #endif
659
660 return NULL;
661 }
662
663 ZTEST_SUITE(threads_lifecycle, NULL, thread_test_setup, NULL, NULL, NULL);
664 ZTEST_SUITE(threads_lifecycle_1cpu, NULL, thread_test_setup,
665 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
666