Lines Matching refs:thread
42 struct k_thread *thread);
44 struct k_thread *thread);
60 static void halt_thread(struct k_thread *thread, uint8_t new_state);
61 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
64 static inline int is_preempt(struct k_thread *thread) in is_preempt() argument
67 return thread->base.preempt <= _PREEMPT_THRESHOLD; in is_preempt()
75 static inline int is_metairq(struct k_thread *thread) in is_metairq() argument
78 return (thread->base.prio - K_HIGHEST_THREAD_PRIO) in is_metairq()
81 ARG_UNUSED(thread); in is_metairq()
87 static inline bool is_thread_dummy(struct k_thread *thread) in is_thread_dummy() argument
89 return (thread->base.thread_state & _THREAD_DUMMY) != 0U; in is_thread_dummy()
135 static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, in should_preempt() argument
158 && z_is_thread_timeout_active(thread)) { in should_preempt()
165 if (is_preempt(_current) || is_metairq(thread)) { in should_preempt()
178 struct k_thread *thread; in _priq_dumb_mask_best() local
180 SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) { in _priq_dumb_mask_best()
181 if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) { in _priq_dumb_mask_best()
182 return thread; in _priq_dumb_mask_best()
191 struct k_thread *thread) in z_priq_dumb_add() argument
195 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in z_priq_dumb_add()
198 if (z_sched_prio_cmp(thread, t) > 0) { in z_priq_dumb_add()
200 &thread->base.qnode_dlist); in z_priq_dumb_add()
205 sys_dlist_append(pq, &thread->base.qnode_dlist); in z_priq_dumb_add()
209 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) in thread_runq() argument
212 int cpu, m = thread->base.cpu_mask; in thread_runq()
224 ARG_UNUSED(thread); in thread_runq()
238 static ALWAYS_INLINE void runq_add(struct k_thread *thread) in runq_add() argument
240 _priq_run_add(thread_runq(thread), thread); in runq_add()
243 static ALWAYS_INLINE void runq_remove(struct k_thread *thread) in runq_remove() argument
245 _priq_run_remove(thread_runq(thread), thread); in runq_remove()
261 static ALWAYS_INLINE void queue_thread(struct k_thread *thread) in queue_thread() argument
263 thread->base.thread_state |= _THREAD_QUEUED; in queue_thread()
264 if (should_queue_thread(thread)) { in queue_thread()
265 runq_add(thread); in queue_thread()
268 if (thread == _current) { in queue_thread()
275 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) in dequeue_thread() argument
277 thread->base.thread_state &= ~_THREAD_QUEUED; in dequeue_thread()
278 if (should_queue_thread(thread)) { in dequeue_thread()
279 runq_remove(thread); in dequeue_thread()
318 static inline bool is_aborting(struct k_thread *thread) in is_aborting() argument
320 return (thread->base.thread_state & _THREAD_ABORTING) != 0U; in is_aborting()
324 static inline bool is_halting(struct k_thread *thread) in is_halting() argument
326 return (thread->base.thread_state & in is_halting()
332 static inline void clear_halting(struct k_thread *thread) in clear_halting() argument
334 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); in clear_halting()
346 struct k_thread *thread = runq_best(); in next_up() local
357 if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) { in next_up()
359 thread = mirqp; in next_up()
373 return (thread != NULL) ? thread : _current_cpu->idle_thread; in next_up()
389 if (thread == NULL) { in next_up()
390 thread = _current_cpu->idle_thread; in next_up()
394 int32_t cmp = z_sched_prio_cmp(_current, thread); in next_up()
398 thread = _current; in next_up()
401 if (!should_preempt(thread, _current_cpu->swap_ok)) { in next_up()
402 thread = _current; in next_up()
407 if (thread != _current && active && in next_up()
413 if (z_is_thread_queued(thread)) { in next_up()
414 dequeue_thread(thread); in next_up()
418 return thread; in next_up()
422 static void move_thread_to_end_of_prio_q(struct k_thread *thread) in move_thread_to_end_of_prio_q() argument
424 if (z_is_thread_queued(thread)) { in move_thread_to_end_of_prio_q()
425 dequeue_thread(thread); in move_thread_to_end_of_prio_q()
427 queue_thread(thread); in move_thread_to_end_of_prio_q()
428 update_cache(thread == _current); in move_thread_to_end_of_prio_q()
456 static inline int slice_time(struct k_thread *thread) in slice_time() argument
461 if (thread->base.slice_ticks != 0) { in slice_time()
462 ret = thread->base.slice_ticks; in slice_time()
465 ARG_UNUSED(thread); in slice_time()
470 static inline bool sliceable(struct k_thread *thread) in sliceable() argument
472 bool ret = is_preempt(thread) in sliceable()
473 && slice_time(thread) != 0 in sliceable()
474 && !z_is_prio_higher(thread->base.prio, slice_max_prio) in sliceable()
475 && !z_is_thread_prevented_from_running(thread) in sliceable()
476 && !z_is_idle_thread_object(thread); in sliceable()
479 ret |= thread->base.slice_ticks != 0; in sliceable()
569 static void update_metairq_preempt(struct k_thread *thread) in update_metairq_preempt() argument
573 if (is_metairq(thread) && !is_metairq(_current) && in update_metairq_preempt()
577 } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) { in update_metairq_preempt()
582 ARG_UNUSED(thread); in update_metairq_preempt()
589 struct k_thread *thread = next_up(); in update_cache() local
591 if (should_preempt(thread, preempt_ok)) { in update_cache()
593 if (thread != _current) { in update_cache()
594 z_reset_time_slice(thread); in update_cache()
597 update_metairq_preempt(thread); in update_cache()
598 _kernel.ready_q.cache = thread; in update_cache()
614 static bool thread_active_elsewhere(struct k_thread *thread) in thread_active_elsewhere() argument
627 (_kernel.cpus[i].current == thread)) { in thread_active_elsewhere()
632 ARG_UNUSED(thread); in thread_active_elsewhere()
636 static void ready_thread(struct k_thread *thread) in ready_thread() argument
639 __ASSERT_NO_MSG(arch_mem_coherent(thread)); in ready_thread()
645 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { in ready_thread()
646 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); in ready_thread()
648 queue_thread(thread); in ready_thread()
654 void z_ready_thread(struct k_thread *thread) in z_ready_thread() argument
657 if (!thread_active_elsewhere(thread)) { in z_ready_thread()
658 ready_thread(thread); in z_ready_thread()
663 void z_move_thread_to_end_of_prio_q(struct k_thread *thread) in z_move_thread_to_end_of_prio_q() argument
666 move_thread_to_end_of_prio_q(thread); in z_move_thread_to_end_of_prio_q()
670 void z_sched_start(struct k_thread *thread) in z_sched_start() argument
674 if (z_has_thread_started(thread)) { in z_sched_start()
679 z_mark_thread_as_started(thread); in z_sched_start()
680 ready_thread(thread); in z_sched_start()
700 static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, in z_thread_halt() argument
714 bool active = thread_active_elsewhere(thread); in z_thread_halt()
718 thread->base.thread_state |= (terminate ? _THREAD_ABORTING in z_thread_halt()
729 if (is_halting(thread) && (thread != _current)) { in z_thread_halt()
733 while (is_halting(thread)) { in z_thread_halt()
741 z_sched_switch_spin(thread); in z_thread_halt()
746 &thread->join_queue : in z_thread_halt()
747 &thread->halt_queue); in z_thread_halt()
753 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); in z_thread_halt()
754 if ((thread == _current) && !arch_is_in_isr()) { in z_thread_halt()
762 void z_impl_k_thread_suspend(struct k_thread *thread) in z_impl_k_thread_suspend() argument
764 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread); in z_impl_k_thread_suspend()
766 (void)z_abort_thread_timeout(thread); in z_impl_k_thread_suspend()
770 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) { in z_impl_k_thread_suspend()
778 z_thread_halt(thread, key, false); in z_impl_k_thread_suspend()
780 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread); in z_impl_k_thread_suspend()
784 static inline void z_vrfy_k_thread_suspend(struct k_thread *thread) in z_vrfy_k_thread_suspend() argument
786 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_suspend()
787 z_impl_k_thread_suspend(thread); in z_vrfy_k_thread_suspend()
792 void z_impl_k_thread_resume(struct k_thread *thread) in z_impl_k_thread_resume() argument
794 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread); in z_impl_k_thread_resume()
799 if (!z_is_thread_suspended(thread)) { in z_impl_k_thread_resume()
804 z_mark_thread_as_not_suspended(thread); in z_impl_k_thread_resume()
805 ready_thread(thread); in z_impl_k_thread_resume()
809 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread); in z_impl_k_thread_resume()
813 static inline void z_vrfy_k_thread_resume(struct k_thread *thread) in z_vrfy_k_thread_resume() argument
815 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_resume()
816 z_impl_k_thread_resume(thread); in z_vrfy_k_thread_resume()
821 static _wait_q_t *pended_on_thread(struct k_thread *thread) in pended_on_thread() argument
823 __ASSERT_NO_MSG(thread->base.pended_on); in pended_on_thread()
825 return thread->base.pended_on; in pended_on_thread()
828 static void unready_thread(struct k_thread *thread) in unready_thread() argument
830 if (z_is_thread_queued(thread)) { in unready_thread()
831 dequeue_thread(thread); in unready_thread()
833 update_cache(thread == _current); in unready_thread()
837 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) in add_to_waitq_locked() argument
839 unready_thread(thread); in add_to_waitq_locked()
840 z_mark_thread_as_pending(thread); in add_to_waitq_locked()
842 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); in add_to_waitq_locked()
845 thread->base.pended_on = wait_q; in add_to_waitq_locked()
846 z_priq_wait_add(&wait_q->waitq, thread); in add_to_waitq_locked()
850 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) in add_thread_timeout() argument
853 z_add_thread_timeout(thread, timeout); in add_thread_timeout()
857 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, in pend_locked() argument
863 add_to_waitq_locked(thread, wait_q); in pend_locked()
864 add_thread_timeout(thread, timeout); in pend_locked()
867 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, in z_pend_thread() argument
870 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); in z_pend_thread()
872 pend_locked(thread, wait_q, timeout); in z_pend_thread()
876 static inline void unpend_thread_no_timeout(struct k_thread *thread) in unpend_thread_no_timeout() argument
878 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); in unpend_thread_no_timeout()
879 z_mark_thread_as_not_pending(thread); in unpend_thread_no_timeout()
880 thread->base.pended_on = NULL; in unpend_thread_no_timeout()
883 ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread) in z_unpend_thread_no_timeout() argument
886 if (thread->base.pended_on != NULL) { in z_unpend_thread_no_timeout()
887 unpend_thread_no_timeout(thread); in z_unpend_thread_no_timeout()
892 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) in z_sched_wake_thread() argument
895 bool killed = (thread->base.thread_state & in z_sched_wake_thread()
899 bool do_nothing = thread->no_wake_on_timeout && is_timeout; in z_sched_wake_thread()
901 thread->no_wake_on_timeout = false; in z_sched_wake_thread()
910 if (thread->base.pended_on != NULL) { in z_sched_wake_thread()
911 unpend_thread_no_timeout(thread); in z_sched_wake_thread()
913 z_mark_thread_as_started(thread); in z_sched_wake_thread()
915 z_mark_thread_as_not_suspended(thread); in z_sched_wake_thread()
917 ready_thread(thread); in z_sched_wake_thread()
927 struct k_thread *thread = CONTAINER_OF(timeout, in z_thread_timeout() local
930 z_sched_wake_thread(thread, true); in z_thread_timeout()
982 struct k_thread *thread = NULL; in z_unpend1_no_timeout() local
985 thread = _priq_wait_best(&wait_q->waitq); in z_unpend1_no_timeout()
987 if (thread != NULL) { in z_unpend1_no_timeout()
988 unpend_thread_no_timeout(thread); in z_unpend1_no_timeout()
992 return thread; in z_unpend1_no_timeout()
997 struct k_thread *thread = NULL; in z_unpend_first_thread() local
1000 thread = _priq_wait_best(&wait_q->waitq); in z_unpend_first_thread()
1002 if (thread != NULL) { in z_unpend_first_thread()
1003 unpend_thread_no_timeout(thread); in z_unpend_first_thread()
1004 (void)z_abort_thread_timeout(thread); in z_unpend_first_thread()
1008 return thread; in z_unpend_first_thread()
1011 void z_unpend_thread(struct k_thread *thread) in z_unpend_thread() argument
1013 z_unpend_thread_no_timeout(thread); in z_unpend_thread()
1014 (void)z_abort_thread_timeout(thread); in z_unpend_thread()
1020 bool z_set_prio(struct k_thread *thread, int prio) in z_set_prio() argument
1025 need_sched = z_is_thread_ready(thread); in z_set_prio()
1029 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { in z_set_prio()
1030 dequeue_thread(thread); in z_set_prio()
1031 thread->base.prio = prio; in z_set_prio()
1032 queue_thread(thread); in z_set_prio()
1034 thread->base.prio = prio; in z_set_prio()
1038 thread->base.prio = prio; in z_set_prio()
1042 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio); in z_set_prio()
1047 void z_thread_priority_set(struct k_thread *thread, int prio) in z_thread_priority_set() argument
1049 bool need_sched = z_set_prio(thread, prio); in z_thread_priority_set()
1255 void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) in z_priq_dumb_remove() argument
1259 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in z_priq_dumb_remove()
1261 sys_dlist_remove(&thread->base.qnode_dlist); in z_priq_dumb_remove()
1266 struct k_thread *thread = NULL; in z_priq_dumb_best() local
1270 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); in z_priq_dumb_best()
1272 return thread; in z_priq_dumb_best()
1295 void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread) in z_priq_rb_add() argument
1299 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in z_priq_rb_add()
1301 thread->base.order_key = pq->next_order_key++; in z_priq_rb_add()
1315 rb_insert(&pq->tree, &thread->base.qnode_rb); in z_priq_rb_add()
1318 void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread) in z_priq_rb_remove() argument
1320 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); in z_priq_rb_remove()
1322 rb_remove(&pq->tree, &thread->base.qnode_rb); in z_priq_rb_remove()
1331 struct k_thread *thread = NULL; in z_priq_rb_best() local
1335 thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb); in z_priq_rb_best()
1337 return thread; in z_priq_rb_best()
1346 struct k_thread *thread) in z_priq_mq_add() argument
1348 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; in z_priq_mq_add()
1350 sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); in z_priq_mq_add()
1355 struct k_thread *thread) in z_priq_mq_remove() argument
1357 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; in z_priq_mq_remove()
1359 sys_dlist_remove(&thread->base.qnode_dlist); in z_priq_mq_remove()
1372 struct k_thread *thread = NULL; in z_priq_mq_best() local
1377 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); in z_priq_mq_best()
1379 return thread; in z_priq_mq_best()
1385 struct k_thread *thread; in z_unpend_all() local
1387 while ((thread = z_waitq_head(wait_q)) != NULL) { in z_unpend_all()
1388 z_unpend_thread(thread); in z_unpend_all()
1389 z_ready_thread(thread); in z_unpend_all()
1424 int z_impl_k_thread_priority_get(k_tid_t thread) in z_impl_k_thread_priority_get() argument
1426 return thread->base.prio; in z_impl_k_thread_priority_get()
1430 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread) in z_vrfy_k_thread_priority_get() argument
1432 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_priority_get()
1433 return z_impl_k_thread_priority_get(thread); in z_vrfy_k_thread_priority_get()
1438 void z_impl_k_thread_priority_set(k_tid_t thread, int prio) in z_impl_k_thread_priority_set() argument
1447 struct k_thread *th = (struct k_thread *)thread; in z_impl_k_thread_priority_set()
1453 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) in z_vrfy_k_thread_priority_set() argument
1455 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_priority_set()
1458 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, in z_vrfy_k_thread_priority_set()
1460 prio, thread->base.prio)); in z_vrfy_k_thread_priority_set()
1462 z_impl_k_thread_priority_set(thread, prio); in z_vrfy_k_thread_priority_set()
1470 struct k_thread *thread = tid; in z_impl_k_thread_deadline_set() local
1473 thread->base.prio_deadline = k_cycle_get_32() + deadline; in z_impl_k_thread_deadline_set()
1474 if (z_is_thread_queued(thread)) { in z_impl_k_thread_deadline_set()
1475 dequeue_thread(thread); in z_impl_k_thread_deadline_set()
1476 queue_thread(thread); in z_impl_k_thread_deadline_set()
1484 struct k_thread *thread = tid; in z_vrfy_k_thread_deadline_set() local
1486 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_thread_deadline_set()
1491 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); in z_vrfy_k_thread_deadline_set()
1641 void z_impl_k_wakeup(k_tid_t thread) in z_impl_k_wakeup() argument
1643 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); in z_impl_k_wakeup()
1645 if (z_is_thread_pending(thread)) { in z_impl_k_wakeup()
1649 if (z_abort_thread_timeout(thread) < 0) { in z_impl_k_wakeup()
1651 if (thread->base.thread_state != _THREAD_SUSPENDED) { in z_impl_k_wakeup()
1656 z_mark_thread_as_not_suspended(thread); in z_impl_k_wakeup()
1657 z_ready_thread(thread); in z_impl_k_wakeup()
1689 static inline void z_vrfy_k_wakeup(k_tid_t thread) in z_vrfy_k_wakeup() argument
1691 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); in z_vrfy_k_wakeup()
1692 z_impl_k_wakeup(thread); in z_vrfy_k_wakeup()
1743 static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask) in cpu_mask_mod() argument
1748 __ASSERT(z_is_thread_prevented_from_running(thread), in cpu_mask_mod()
1753 if (z_is_thread_prevented_from_running(thread)) { in cpu_mask_mod()
1754 thread->base.cpu_mask |= enable_mask; in cpu_mask_mod()
1755 thread->base.cpu_mask &= ~disable_mask; in cpu_mask_mod()
1762 int m = thread->base.cpu_mask; in cpu_mask_mod()
1771 int k_thread_cpu_mask_clear(k_tid_t thread) in k_thread_cpu_mask_clear() argument
1773 return cpu_mask_mod(thread, 0, 0xffffffff); in k_thread_cpu_mask_clear()
1776 int k_thread_cpu_mask_enable_all(k_tid_t thread) in k_thread_cpu_mask_enable_all() argument
1778 return cpu_mask_mod(thread, 0xffffffff, 0); in k_thread_cpu_mask_enable_all()
1781 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu) in k_thread_cpu_mask_enable() argument
1783 return cpu_mask_mod(thread, BIT(cpu), 0); in k_thread_cpu_mask_enable()
1786 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) in k_thread_cpu_mask_disable() argument
1788 return cpu_mask_mod(thread, 0, BIT(cpu)); in k_thread_cpu_mask_disable()
1791 int k_thread_cpu_pin(k_tid_t thread, int cpu) in k_thread_cpu_pin() argument
1795 ret = k_thread_cpu_mask_clear(thread); in k_thread_cpu_pin()
1797 return k_thread_cpu_mask_enable(thread, cpu); in k_thread_cpu_pin()
1806 struct k_thread *thread; in unpend_all() local
1808 while ((thread = z_waitq_head(wait_q)) != NULL) { in unpend_all()
1809 unpend_thread_no_timeout(thread); in unpend_all()
1810 (void)z_abort_thread_timeout(thread); in unpend_all()
1811 arch_thread_return_value_set(thread, 0); in unpend_all()
1812 ready_thread(thread); in unpend_all()
1817 extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
1828 static void halt_thread(struct k_thread *thread, uint8_t new_state) in halt_thread() argument
1833 if ((thread->base.thread_state & new_state) == 0U) { in halt_thread()
1834 thread->base.thread_state |= new_state; in halt_thread()
1835 clear_halting(thread); in halt_thread()
1836 if (z_is_thread_queued(thread)) { in halt_thread()
1837 dequeue_thread(thread); in halt_thread()
1841 if (thread->base.pended_on != NULL) { in halt_thread()
1842 unpend_thread_no_timeout(thread); in halt_thread()
1844 (void)z_abort_thread_timeout(thread); in halt_thread()
1845 unpend_all(&thread->join_queue); in halt_thread()
1848 unpend_all(&thread->halt_queue); in halt_thread()
1857 arch_float_disable(thread); in halt_thread()
1860 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); in halt_thread()
1862 z_thread_monitor_exit(thread); in halt_thread()
1865 z_thread_cmsis_status_mask_clear(thread); in halt_thread()
1870 k_obj_core_stats_deregister(K_OBJ_CORE(thread)); in halt_thread()
1872 k_obj_core_unlink(K_OBJ_CORE(thread)); in halt_thread()
1876 z_mem_domain_exit_thread(thread); in halt_thread()
1877 k_thread_perms_all_clear(thread); in halt_thread()
1878 k_object_uninit(thread->stack_obj); in halt_thread()
1879 k_object_uninit(thread); in halt_thread()
1884 void z_thread_abort(struct k_thread *thread) in z_thread_abort() argument
1888 if ((thread->base.user_options & K_ESSENTIAL) != 0) { in z_thread_abort()
1890 __ASSERT(false, "aborting essential thread %p", thread); in z_thread_abort()
1895 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_thread_abort()
1900 z_thread_halt(thread, key, true); in z_thread_abort()
1904 void z_impl_k_thread_abort(struct k_thread *thread) in z_impl_k_thread_abort() argument
1906 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); in z_impl_k_thread_abort()
1908 z_thread_abort(thread); in z_impl_k_thread_abort()
1910 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); in z_impl_k_thread_abort()
1914 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) in z_impl_k_thread_join() argument
1919 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout); in z_impl_k_thread_join()
1921 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_impl_k_thread_join()
1922 z_sched_switch_spin(thread); in z_impl_k_thread_join()
1926 } else if ((thread == _current) || in z_impl_k_thread_join()
1927 (thread->base.pended_on == &_current->join_queue)) { in z_impl_k_thread_join()
1931 add_to_waitq_locked(_current, &thread->join_queue); in z_impl_k_thread_join()
1934 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); in z_impl_k_thread_join()
1936 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); in z_impl_k_thread_join()
1941 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); in z_impl_k_thread_join()
1956 static bool thread_obj_validate(struct k_thread *thread) in thread_obj_validate() argument
1958 struct k_object *ko = k_object_find(thread); in thread_obj_validate()
1968 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD); in thread_obj_validate()
1975 static inline int z_vrfy_k_thread_join(struct k_thread *thread, in z_vrfy_k_thread_join() argument
1978 if (thread_obj_validate(thread)) { in z_vrfy_k_thread_join()
1982 return z_impl_k_thread_join(thread, timeout); in z_vrfy_k_thread_join()
1986 static inline void z_vrfy_k_thread_abort(k_tid_t thread) in z_vrfy_k_thread_abort() argument
1988 if (thread_obj_validate(thread)) { in z_vrfy_k_thread_abort()
1992 K_OOPS(K_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL), in z_vrfy_k_thread_abort()
1993 "aborting essential thread %p", thread)); in z_vrfy_k_thread_abort()
1995 z_impl_k_thread_abort((struct k_thread *)thread); in z_vrfy_k_thread_abort()
2005 struct k_thread *thread; in z_sched_wake() local
2009 thread = _priq_wait_best(&wait_q->waitq); in z_sched_wake()
2011 if (thread != NULL) { in z_sched_wake()
2012 z_thread_return_value_set_with_data(thread, in z_sched_wake()
2015 unpend_thread_no_timeout(thread); in z_sched_wake()
2016 (void)z_abort_thread_timeout(thread); in z_sched_wake()
2017 ready_thread(thread); in z_sched_wake()
2039 struct k_thread *thread; in z_sched_waitq_walk() local
2043 _WAIT_Q_FOR_EACH(wait_q, thread) { in z_sched_waitq_walk()
2051 status = func(thread, data); in z_sched_waitq_walk()