Lines Matching full:thread

33 /* Storage to "complete" the context switch from an invalid/incomplete thread
39 static void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
90 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) in thread_runq() argument
93 int cpu, m = thread->base.cpu_mask; in thread_runq()
96 * thread with all CPUs masked off (i.e. one that isn't in thread_runq()
105 ARG_UNUSED(thread); in thread_runq()
119 static ALWAYS_INLINE void runq_add(struct k_thread *thread) in runq_add() argument
121 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in runq_add()
123 _priq_run_add(thread_runq(thread), thread); in runq_add()
126 static ALWAYS_INLINE void runq_remove(struct k_thread *thread) in runq_remove() argument
128 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in runq_remove()
130 _priq_run_remove(thread_runq(thread), thread); in runq_remove()
141 static inline bool should_queue_thread(struct k_thread *thread) in should_queue_thread() argument
143 return !IS_ENABLED(CONFIG_SMP) || (thread != _current); in should_queue_thread()
146 static ALWAYS_INLINE void queue_thread(struct k_thread *thread) in queue_thread() argument
148 thread->base.thread_state |= _THREAD_QUEUED; in queue_thread()
149 if (should_queue_thread(thread)) { in queue_thread()
150 runq_add(thread); in queue_thread()
153 if (thread == _current) { in queue_thread()
160 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) in dequeue_thread() argument
162 thread->base.thread_state &= ~_THREAD_QUEUED; in dequeue_thread()
163 if (should_queue_thread(thread)) { in dequeue_thread()
164 runq_remove(thread); in dequeue_thread()
168 /* Called out of z_swap() when CONFIG_SMP. The current thread can
174 void z_requeue_current(struct k_thread *thread) in z_requeue_current() argument
176 if (z_is_thread_queued(thread)) { in z_requeue_current()
177 runq_add(thread); in z_requeue_current()
182 /* Return true if the thread is aborting, else false */
183 static inline bool is_aborting(struct k_thread *thread) in is_aborting() argument
185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U; in is_aborting()
188 /* Return true if the thread is aborting or suspending, else false */
189 static inline bool is_halting(struct k_thread *thread) in is_halting() argument
191 return (thread->base.thread_state & in is_halting()
196 static inline void clear_halting(struct k_thread *thread) in clear_halting() argument
199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); in clear_halting()
211 struct k_thread *thread = runq_best(); in next_up() local
216 * cooperative thread they preempted and not whatever happens in next_up()
217 * to be highest priority now. The cooperative thread was in next_up()
222 if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) { in next_up()
224 thread = mirqp; in next_up()
235 /* In uniprocessor mode, we can leave the current thread in in next_up()
241 return (thread != NULL) ? thread : _current_cpu->idle_thread; in next_up()
244 * thread doesn't work, so we have more work to do to test in next_up()
246 * thread selected above represents "the best thread that is in next_up()
257 if (thread == NULL) { in next_up()
258 thread = _current_cpu->idle_thread; in next_up()
262 int32_t cmp = z_sched_prio_cmp(_current, thread); in next_up()
266 thread = _current; in next_up()
269 if (!should_preempt(thread, _current_cpu->swap_ok)) { in next_up()
270 thread = _current; in next_up()
275 if ((thread != _current) && active && in next_up()
281 if (z_is_thread_queued(thread)) { in next_up()
282 dequeue_thread(thread); in next_up()
286 return thread; in next_up()
290 void move_thread_to_end_of_prio_q(struct k_thread *thread) in move_thread_to_end_of_prio_q() argument
292 if (z_is_thread_queued(thread)) { in move_thread_to_end_of_prio_q()
293 dequeue_thread(thread); in move_thread_to_end_of_prio_q()
295 queue_thread(thread); in move_thread_to_end_of_prio_q()
296 update_cache(thread == _current); in move_thread_to_end_of_prio_q()
300 * them specifically. Called at the moment a new thread has been
303 static void update_metairq_preempt(struct k_thread *thread) in update_metairq_preempt() argument
307 if (thread_is_metairq(thread) && !thread_is_metairq(_current) && in update_metairq_preempt()
311 } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) { in update_metairq_preempt()
316 ARG_UNUSED(thread); in update_metairq_preempt()
326 struct k_thread *thread = next_up(); in update_cache() local
328 if (should_preempt(thread, preempt_ok)) { in update_cache()
330 if (thread != _current) { in update_cache()
331 z_reset_time_slice(thread); in update_cache()
334 update_metairq_preempt(thread); in update_cache()
335 _kernel.ready_q.cache = thread; in update_cache()
344 * thread because if the thread gets preempted for whatever in update_cache()
351 static struct _cpu *thread_active_elsewhere(struct k_thread *thread) in thread_active_elsewhere() argument
353 /* Returns pointer to _cpu if the thread is currently running on in thread_active_elsewhere()
364 (_kernel.cpus[i].current == thread)) { in thread_active_elsewhere()
369 ARG_UNUSED(thread); in thread_active_elsewhere()
373 static void ready_thread(struct k_thread *thread) in ready_thread() argument
376 __ASSERT_NO_MSG(arch_mem_coherent(thread)); in ready_thread()
379 /* If thread is queued already, do not try and added it to the in ready_thread()
382 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { in ready_thread()
383 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); in ready_thread()
385 queue_thread(thread); in ready_thread()
388 flag_ipi(ipi_mask_create(thread)); in ready_thread()
392 void z_ready_thread_locked(struct k_thread *thread) in z_ready_thread_locked() argument
394 if (thread_active_elsewhere(thread) == NULL) { in z_ready_thread_locked()
395 ready_thread(thread); in z_ready_thread_locked()
399 void z_ready_thread(struct k_thread *thread) in z_ready_thread() argument
402 if (thread_active_elsewhere(thread) == NULL) { in z_ready_thread()
403 ready_thread(thread); in z_ready_thread()
408 void z_move_thread_to_end_of_prio_q(struct k_thread *thread) in z_move_thread_to_end_of_prio_q() argument
411 move_thread_to_end_of_prio_q(thread); in z_move_thread_to_end_of_prio_q()
415 void z_sched_start(struct k_thread *thread) in z_sched_start() argument
419 if (z_has_thread_started(thread)) { in z_sched_start()
424 z_mark_thread_as_started(thread); in z_sched_start()
425 ready_thread(thread); in z_sched_start()
429 /* Spins in ISR context, waiting for a thread known to be running on
435 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) in thread_halt_spin() argument
442 while (is_halting(thread)) { in thread_halt_spin()
456 static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, in z_thread_halt() argument
459 _wait_q_t *wq = &thread->join_queue; in z_thread_halt()
461 wq = terminate ? wq : &thread->halt_queue; in z_thread_halt()
464 /* If the target is a thread running on another CPU, flag and in z_thread_halt()
471 struct _cpu *cpu = thread_active_elsewhere(thread); in z_thread_halt()
474 thread->base.thread_state |= (terminate ? _THREAD_ABORTING in z_thread_halt()
484 thread_halt_spin(thread, key); in z_thread_halt()
490 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); in z_thread_halt()
491 if ((thread == _current) && !arch_is_in_isr()) { in z_thread_halt()
505 void z_impl_k_thread_suspend(k_tid_t thread) in z_impl_k_thread_suspend() argument
507 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread); in z_impl_k_thread_suspend()
509 (void)z_abort_thread_timeout(thread); in z_impl_k_thread_suspend()
513 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) { in z_impl_k_thread_suspend()
515 /* The target thread is already suspended. Nothing to do. */ in z_impl_k_thread_suspend()
521 z_thread_halt(thread, key, false); in z_impl_k_thread_suspend()
523 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread); in z_impl_k_thread_suspend()
527 static inline void z_vrfy_k_thread_suspend(k_tid_t thread) in z_vrfy_k_thread_suspend() argument
529 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_suspend()
530 z_impl_k_thread_suspend(thread); in z_vrfy_k_thread_suspend()
535 void z_impl_k_thread_resume(k_tid_t thread) in z_impl_k_thread_resume() argument
537 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread); in z_impl_k_thread_resume()
541 /* Do not try to resume a thread that was not suspended */ in z_impl_k_thread_resume()
542 if (!z_is_thread_suspended(thread)) { in z_impl_k_thread_resume()
547 z_mark_thread_as_not_suspended(thread); in z_impl_k_thread_resume()
548 ready_thread(thread); in z_impl_k_thread_resume()
552 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread); in z_impl_k_thread_resume()
556 static inline void z_vrfy_k_thread_resume(k_tid_t thread) in z_vrfy_k_thread_resume() argument
558 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_resume()
559 z_impl_k_thread_resume(thread); in z_vrfy_k_thread_resume()
564 static _wait_q_t *pended_on_thread(struct k_thread *thread) in pended_on_thread() argument
566 __ASSERT_NO_MSG(thread->base.pended_on); in pended_on_thread()
568 return thread->base.pended_on; in pended_on_thread()
571 static void unready_thread(struct k_thread *thread) in unready_thread() argument
573 if (z_is_thread_queued(thread)) { in unready_thread()
574 dequeue_thread(thread); in unready_thread()
576 update_cache(thread == _current); in unready_thread()
580 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) in add_to_waitq_locked() argument
582 unready_thread(thread); in add_to_waitq_locked()
583 z_mark_thread_as_pending(thread); in add_to_waitq_locked()
585 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); in add_to_waitq_locked()
588 thread->base.pended_on = wait_q; in add_to_waitq_locked()
589 _priq_wait_add(&wait_q->waitq, thread); in add_to_waitq_locked()
593 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) in add_thread_timeout() argument
596 z_add_thread_timeout(thread, timeout); in add_thread_timeout()
600 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, in pend_locked() argument
606 add_to_waitq_locked(thread, wait_q); in pend_locked()
607 add_thread_timeout(thread, timeout); in pend_locked()
610 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, in z_pend_thread() argument
613 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); in z_pend_thread()
615 pend_locked(thread, wait_q, timeout); in z_pend_thread()
619 static inline void unpend_thread_no_timeout(struct k_thread *thread) in unpend_thread_no_timeout() argument
621 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); in unpend_thread_no_timeout()
622 z_mark_thread_as_not_pending(thread); in unpend_thread_no_timeout()
623 thread->base.pended_on = NULL; in unpend_thread_no_timeout()
626 void z_unpend_thread_no_timeout(struct k_thread *thread) in z_unpend_thread_no_timeout() argument
629 if (thread->base.pended_on != NULL) { in z_unpend_thread_no_timeout()
630 unpend_thread_no_timeout(thread); in z_unpend_thread_no_timeout()
635 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) in z_sched_wake_thread() argument
638 bool killed = (thread->base.thread_state & in z_sched_wake_thread()
642 bool do_nothing = thread->no_wake_on_timeout && is_timeout; in z_sched_wake_thread()
644 thread->no_wake_on_timeout = false; in z_sched_wake_thread()
652 /* The thread is not being killed */ in z_sched_wake_thread()
653 if (thread->base.pended_on != NULL) { in z_sched_wake_thread()
654 unpend_thread_no_timeout(thread); in z_sched_wake_thread()
656 z_mark_thread_as_started(thread); in z_sched_wake_thread()
658 z_mark_thread_as_not_suspended(thread); in z_sched_wake_thread()
660 ready_thread(thread); in z_sched_wake_thread()
670 struct k_thread *thread = CONTAINER_OF(timeout, in z_thread_timeout() local
673 z_sched_wake_thread(thread, true); in z_thread_timeout()
701 struct k_thread *thread = NULL; in z_unpend1_no_timeout() local
704 thread = _priq_wait_best(&wait_q->waitq); in z_unpend1_no_timeout()
706 if (thread != NULL) { in z_unpend1_no_timeout()
707 unpend_thread_no_timeout(thread); in z_unpend1_no_timeout()
711 return thread; in z_unpend1_no_timeout()
716 struct k_thread *thread = NULL; in z_unpend_first_thread() local
719 thread = _priq_wait_best(&wait_q->waitq); in z_unpend_first_thread()
721 if (thread != NULL) { in z_unpend_first_thread()
722 unpend_thread_no_timeout(thread); in z_unpend_first_thread()
723 (void)z_abort_thread_timeout(thread); in z_unpend_first_thread()
727 return thread; in z_unpend_first_thread()
730 void z_unpend_thread(struct k_thread *thread) in z_unpend_thread() argument
732 z_unpend_thread_no_timeout(thread); in z_unpend_thread()
733 (void)z_abort_thread_timeout(thread); in z_unpend_thread()
739 bool z_thread_prio_set(struct k_thread *thread, int prio) in z_thread_prio_set() argument
742 int old_prio = thread->base.prio; in z_thread_prio_set()
745 need_sched = z_is_thread_ready(thread); in z_thread_prio_set()
748 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { in z_thread_prio_set()
749 dequeue_thread(thread); in z_thread_prio_set()
750 thread->base.prio = prio; in z_thread_prio_set()
751 queue_thread(thread); in z_thread_prio_set()
754 flag_ipi(ipi_mask_create(thread)); in z_thread_prio_set()
758 * This is a running thread on SMP. Update its in z_thread_prio_set()
764 thread->base.prio = prio; in z_thread_prio_set()
768 cpu = thread_active_elsewhere(thread); in z_thread_prio_set()
776 thread->base.prio = prio; in z_thread_prio_set()
780 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio); in z_thread_prio_set()
795 * Check if the next ready thread is the same as the current thread
806 /* Check if the next ready thread is the same as the current thread */ in need_swap()
886 * @brief Determine next thread to execute upon completion of an interrupt
888 * Thread preemption is performed by context switching after the completion
889 * of a non-recursed interrupt. This function determines which thread to
892 * - The handle for the interrupted thread in which case the thread's context
895 * - NULL if more work is required to fully save the thread's state after
896 * it is known that a new thread is to be scheduled. It is up to the caller
897 * to store the handle resulting from the thread that is being switched out
898 * in that thread's "switch_handle" field after its
902 * If a new thread needs to be scheduled then its handle is returned.
909 * to the interrupted thread anymore. It might be necessary to make a local
912 * @param interrupted Handle for the thread that was interrupted or NULL.
913 * @retval Handle for the next thread to execute, or @p interrupted when
914 * no new thread is to be scheduled.
952 * confused when the "wrong" thread tries to in z_get_next_switch_handle()
958 /* A queued (runnable) old/current thread in z_get_next_switch_handle()
995 struct k_thread *thread; in z_unpend_all() local
997 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) { in z_unpend_all()
998 z_unpend_thread(thread); in z_unpend_all()
999 z_ready_thread(thread); in z_unpend_all()
1034 void z_impl_k_thread_priority_set(k_tid_t thread, int prio) in z_impl_k_thread_priority_set() argument
1042 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio); in z_impl_k_thread_priority_set()
1051 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) in z_vrfy_k_thread_priority_set() argument
1053 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_priority_set()
1055 "invalid thread priority %d", prio)); in z_vrfy_k_thread_priority_set()
1057 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, in z_vrfy_k_thread_priority_set()
1058 "thread priority may only be downgraded (%d < %d)", in z_vrfy_k_thread_priority_set()
1059 prio, thread->base.prio)); in z_vrfy_k_thread_priority_set()
1061 z_impl_k_thread_priority_set(thread, prio); in z_vrfy_k_thread_priority_set()
1072 struct k_thread *thread = tid; in z_impl_k_thread_deadline_set() local
1076 * change it while the thread is in the run queue (dlists in z_impl_k_thread_deadline_set()
1082 if (z_is_thread_queued(thread)) { in z_impl_k_thread_deadline_set()
1083 dequeue_thread(thread); in z_impl_k_thread_deadline_set()
1084 thread->base.prio_deadline = newdl; in z_impl_k_thread_deadline_set()
1085 queue_thread(thread); in z_impl_k_thread_deadline_set()
1087 thread->base.prio_deadline = newdl; in z_impl_k_thread_deadline_set()
1095 struct k_thread *thread = tid; in z_vrfy_k_thread_deadline_set() local
1097 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_deadline_set()
1099 "invalid thread deadline %d", in z_vrfy_k_thread_deadline_set()
1102 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); in z_vrfy_k_thread_deadline_set()
1145 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); in z_tick_sleep()
1241 void z_impl_k_wakeup(k_tid_t thread) in z_impl_k_wakeup() argument
1243 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); in z_impl_k_wakeup()
1245 if (z_is_thread_pending(thread)) { in z_impl_k_wakeup()
1249 if (z_abort_thread_timeout(thread) < 0) { in z_impl_k_wakeup()
1251 if (thread->base.thread_state != _THREAD_SUSPENDED) { in z_impl_k_wakeup()
1258 z_mark_thread_as_not_suspended(thread); in z_impl_k_wakeup()
1260 if (thread_active_elsewhere(thread) == NULL) { in z_impl_k_wakeup()
1261 ready_thread(thread); in z_impl_k_wakeup()
1272 static inline void z_vrfy_k_wakeup(k_tid_t thread) in z_vrfy_k_wakeup() argument
1274 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_wakeup()
1275 z_impl_k_wakeup(thread); in z_vrfy_k_wakeup()
1308 struct k_thread *thread; in unpend_all() local
1310 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) { in unpend_all()
1311 unpend_thread_no_timeout(thread); in unpend_all()
1312 (void)z_abort_thread_timeout(thread); in unpend_all()
1313 arch_thread_return_value_set(thread, 0); in unpend_all()
1314 ready_thread(thread); in unpend_all()
1319 extern void thread_abort_hook(struct k_thread *thread);
1323 * @brief Dequeues the specified thread
1325 * Dequeues the specified thread and move it into the specified new state.
1327 * @param thread Identify the thread to halt
1328 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1330 static void halt_thread(struct k_thread *thread, uint8_t new_state) in halt_thread() argument
1334 /* We hold the lock, and the thread is known not to be running in halt_thread()
1337 if ((thread->base.thread_state & new_state) == 0U) { in halt_thread()
1338 thread->base.thread_state |= new_state; in halt_thread()
1339 if (z_is_thread_queued(thread)) { in halt_thread()
1340 dequeue_thread(thread); in halt_thread()
1344 if (thread->base.pended_on != NULL) { in halt_thread()
1345 unpend_thread_no_timeout(thread); in halt_thread()
1347 (void)z_abort_thread_timeout(thread); in halt_thread()
1348 unpend_all(&thread->join_queue); in halt_thread()
1356 if (thread == _current && arch_is_in_isr()) { in halt_thread()
1361 unpend_all(&thread->halt_queue); in halt_thread()
1366 clear_halting(thread); in halt_thread()
1371 arch_float_disable(thread); in halt_thread()
1374 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); in halt_thread()
1376 z_thread_monitor_exit(thread); in halt_thread()
1378 thread_abort_hook(thread); in halt_thread()
1383 k_obj_core_stats_deregister(K_OBJ_CORE(thread)); in halt_thread()
1385 k_obj_core_unlink(K_OBJ_CORE(thread)); in halt_thread()
1389 z_mem_domain_exit_thread(thread); in halt_thread()
1390 k_thread_perms_all_clear(thread); in halt_thread()
1391 k_object_uninit(thread->stack_obj); in halt_thread()
1392 k_object_uninit(thread); in halt_thread()
1396 k_thread_abort_cleanup(thread); in halt_thread()
1405 * never be used, as our thread is flagged dead, but in halt_thread()
1416 /* Finally update the halting thread state, on which in halt_thread()
1420 clear_halting(thread); in halt_thread()
1424 void z_thread_abort(struct k_thread *thread) in z_thread_abort() argument
1428 if (z_is_thread_essential(thread)) { in z_thread_abort()
1430 __ASSERT(false, "aborting essential thread %p", thread); in z_thread_abort()
1435 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_thread_abort()
1440 z_thread_halt(thread, key, true); in z_thread_abort()
1444 void z_impl_k_thread_abort(k_tid_t thread) in z_impl_k_thread_abort() argument
1446 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); in z_impl_k_thread_abort()
1448 z_thread_abort(thread); in z_impl_k_thread_abort()
1450 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0); in z_impl_k_thread_abort()
1452 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); in z_impl_k_thread_abort()
1456 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) in z_impl_k_thread_join() argument
1461 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout); in z_impl_k_thread_join()
1463 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_impl_k_thread_join()
1464 z_sched_switch_spin(thread); in z_impl_k_thread_join()
1468 } else if ((thread == _current) || in z_impl_k_thread_join()
1469 (thread->base.pended_on == &_current->join_queue)) { in z_impl_k_thread_join()
1473 add_to_waitq_locked(_current, &thread->join_queue); in z_impl_k_thread_join()
1476 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); in z_impl_k_thread_join()
1478 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); in z_impl_k_thread_join()
1483 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); in z_impl_k_thread_join()
1490 /* Special case: don't oops if the thread is uninitialized. This is because
1491 * the initialization bit does double-duty for thread objects; if false, means
1492 * the thread object is truly uninitialized, or the thread ran and exited for
1498 static bool thread_obj_validate(struct k_thread *thread) in thread_obj_validate() argument
1500 struct k_object *ko = k_object_find(thread); in thread_obj_validate()
1510 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD); in thread_obj_validate()
1517 static inline int z_vrfy_k_thread_join(struct k_thread *thread, in z_vrfy_k_thread_join() argument
1520 if (thread_obj_validate(thread)) { in z_vrfy_k_thread_join()
1524 return z_impl_k_thread_join(thread, timeout); in z_vrfy_k_thread_join()
1528 static inline void z_vrfy_k_thread_abort(k_tid_t thread) in z_vrfy_k_thread_abort() argument
1530 if (thread_obj_validate(thread)) { in z_vrfy_k_thread_abort()
1534 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread), in z_vrfy_k_thread_abort()
1535 "aborting essential thread %p", thread)); in z_vrfy_k_thread_abort()
1537 z_impl_k_thread_abort((struct k_thread *)thread); in z_vrfy_k_thread_abort()
1547 struct k_thread *thread; in z_sched_wake() local
1551 thread = _priq_wait_best(&wait_q->waitq); in z_sched_wake()
1553 if (thread != NULL) { in z_sched_wake()
1554 z_thread_return_value_set_with_data(thread, in z_sched_wake()
1557 unpend_thread_no_timeout(thread); in z_sched_wake()
1558 (void)z_abort_thread_timeout(thread); in z_sched_wake()
1559 ready_thread(thread); in z_sched_wake()
1581 struct k_thread *thread; in z_sched_waitq_walk() local
1585 _WAIT_Q_FOR_EACH(wait_q, thread) { in z_sched_waitq_walk()
1588 * Invoke the callback function on each waiting thread in z_sched_waitq_walk()
1593 status = func(thread, data); in z_sched_waitq_walk()