Lines Matching +full:idle +full:- +full:mode
4 * SPDX-License-Identifier: Apache-2.0
36 *start_addr = (char *)k_current_get()->stack_info.start; in z_impl_stack_info_get()
37 *size = k_current_get()->stack_info.size; in z_impl_stack_info_get()
123 printk(" - Thread reports buffer %p size %zu\n", stack_start, in stack_buffer_scenarios()
149 zassert_true(STEST_STACKSIZE <= (obj_size - reserved), in stack_buffer_scenarios()
166 printk(" - check read/write to stack buffer\n"); in stack_buffer_scenarios()
187 /* If we're in user mode, check every byte in the stack buffer in stack_buffer_scenarios()
201 zassert_true(check_perms(stack_start - 1, 1, 0), in stack_buffer_scenarios()
202 "user mode access to memory %p before start of stack object", in stack_buffer_scenarios()
203 obj_start - 1); in stack_buffer_scenarios()
205 "user mode access to memory %p past end of stack object", in stack_buffer_scenarios()
210 * when transitioning to user mode on RISC-V. Reinstate that in stack_buffer_scenarios()
212 * with a static non-zero K_THREAD_STACK_RESERVED definition. in stack_buffer_scenarios()
218 stack_size -= reserved; in stack_buffer_scenarios()
221 zassert_true(stack_size <= obj_size - reserved, in stack_buffer_scenarios()
227 carveout = stack_start - stack_buf; in stack_buffer_scenarios()
228 printk(" - Carved-out space in buffer: %zu\n", carveout); in stack_buffer_scenarios()
231 "Suspicious carve-out space reported"); in stack_buffer_scenarios()
233 end_space = obj_end - stack_end; in stack_buffer_scenarios()
234 printk(" - Unused objects space: %ld\n", end_space); in stack_buffer_scenarios()
248 * K_THREAD_STACK_LEN(X) - K_THREAD_STACK_RESERVED == in stack_buffer_scenarios()
252 * K_KERNEL_STACK_LEN(Y) - K_KERNEL_STACK_RESERVED == in stack_buffer_scenarios()
256 /* Not defined if user mode disabled, all stacks are kernel stacks */ in stack_buffer_scenarios()
264 adjusted -= reserved; in stack_buffer_scenarios()
268 /* For arrays there may be unused space per-object. This is because in stack_buffer_scenarios()
276 * We do not auto-expand usable space to cover this unused area. Doing in stack_buffer_scenarios()
282 * K_THREAD_STACK_LEN(X) - K_THREAD_STACK_RESERVED; in stack_buffer_scenarios()
290 adjusted -= reserved; in stack_buffer_scenarios()
304 expected = -ENOTSUP; in stack_buffer_scenarios()
311 printk("self-reported unused stack space: %zu\n", unused); in stack_buffer_scenarios()
335 -1, flags, K_FOREVER); in stest_thread_launch()
340 printk(" - Memory mapped stack object %p\n", scenario_data.stack_mapped); in stest_thread_launch()
349 if (ret == -EINVAL) { in stest_thread_launch()
372 metadata_size = zo->data.stack_data->size; in scenario_entry()
374 metadata_size = zo->data.stack_size; in scenario_entry()
394 printk(" - Testing supervisor mode\n"); in scenario_entry()
399 printk(" - Testing user mode (direct launch)\n"); in scenario_entry()
401 printk(" - Testing user mode (drop)\n"); in scenario_entry()
479 /* thread self-aborts, triggering idle thread cleanup */ in no_op_entry()
483 * @brief Show that the idle thread stack size is correct
485 * The idle thread has to occasionally clean up self-exiting threads.
495 * the idle stack may have been initialized on a in ZTEST()
503 /* 1cpu test case, so all other CPUs are spinning with co-op in ZTEST()
506 struct k_thread *idle = arch_curr_cpu()->idle_thread; in ZTEST() local
508 struct k_thread *idle = _current_cpu->idle_thread; in ZTEST() local
512 /* Spwawn a child thread which self-exits */ in ZTEST()
516 -1, 0, K_NO_WAIT); in ZTEST()
520 /* Also sleep for a bit, which also exercises the idle thread in ZTEST()
525 /* Now measure idle thread stack usage */ in ZTEST()
526 ret = k_thread_stack_space_get(idle, &unused_bytes); in ZTEST()
528 zassert_true(unused_bytes > 0, "idle thread stack size %d too low", in ZTEST()
530 printk("unused idle thread stack size: %zu/%d (%zu used)\n", in ZTEST()
532 CONFIG_IDLE_STACK_SIZE - unused_bytes); in ZTEST()