Lines Matching +full:local +full:- +full:ipi +full:- +full:id
4 * SPDX-License-Identifier: Apache-2.0
13 #include <ipi.h>
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
60 int32_t b1 = thread_1->base.prio; in z_sched_prio_cmp()
61 int32_t b2 = thread_2->base.prio; in z_sched_prio_cmp()
64 return b2 - b1; in z_sched_prio_cmp()
71 * guaranteed to be (2's complement) non-negative. We can in z_sched_prio_cmp()
75 uint32_t d1 = thread_1->base.prio_deadline; in z_sched_prio_cmp()
76 uint32_t d2 = thread_2->base.prio_deadline; in z_sched_prio_cmp()
84 return (int32_t) (d2 - d1); in z_sched_prio_cmp()
93 int cpu, m = thread->base.cpu_mask; in thread_runq()
113 return &arch_curr_cpu()->ready_q.runq; in curr_cpu_runq()
148 thread->base.thread_state |= _THREAD_QUEUED; in queue_thread()
155 _current_cpu->swap_ok = true; in queue_thread()
162 thread->base.thread_state &= ~_THREAD_QUEUED; in dequeue_thread()
185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U; in is_aborting()
191 return (thread->base.thread_state & in is_halting()
199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); in clear_halting()
218 * promised it wouldn't be preempted (by non-metairq threads)! in next_up()
220 struct k_thread *mirqp = _current_cpu->metairq_preempted; in next_up()
226 _current_cpu->metairq_preempted = NULL; in next_up()
241 return (thread != NULL) ? thread : _current_cpu->idle_thread; in next_up()
252 * queue such that we don't want to re-add it". in next_up()
258 thread = _current_cpu->idle_thread; in next_up()
265 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { in next_up()
269 if (!should_preempt(thread, _current_cpu->swap_ok)) { in next_up()
285 _current_cpu->swap_ok = false; in next_up()
310 _current_cpu->metairq_preempted = arch_current_thread(); in update_metairq_preempt()
313 _current_cpu->metairq_preempted = NULL; in update_metairq_preempt()
347 _current_cpu->swap_ok = preempt_ok; in update_cache()
358 int currcpu = _current_cpu->id; in thread_active_elsewhere()
409 * another CPU to catch the IPI we sent and halt. Note that we check
438 _wait_q_t *wq = &thread->join_queue; in z_thread_halt()
440 wq = terminate ? wq : &thread->halt_queue; in z_thread_halt()
445 * synchronous IPI is needed here, not deferred!), it will in z_thread_halt()
446 * halt itself in the IPI. Otherwise it's unscheduled, so we in z_thread_halt()
453 thread->base.thread_state |= (terminate ? _THREAD_ABORTING in z_thread_halt()
457 arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id)); in z_thread_halt()
479 * re-take the lock! in z_thread_halt()
503 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) { in z_impl_k_thread_suspend()
571 thread->base.pended_on = wait_q; in add_to_waitq_locked()
572 _priq_wait_add(&wait_q->waitq, thread); in add_to_waitq_locked()
605 if (thread->base.pended_on != NULL) { in z_unpend_thread_no_timeout()
614 bool killed = (thread->base.thread_state & in z_sched_wake_thread()
618 bool do_nothing = thread->no_wake_on_timeout && is_timeout; in z_sched_wake_thread()
620 thread->no_wake_on_timeout = false; in z_sched_wake_thread()
629 if (thread->base.pended_on != NULL) { in z_sched_wake_thread()
660 * that we hold the scheduler lock and leave local interrupts in z_pend_curr()
677 thread = _priq_wait_best(&wait_q->waitq); in z_unpend1_no_timeout()
699 int old_prio = thread->base.prio; in z_thread_prio_set()
707 thread->base.prio = prio; in z_thread_prio_set()
716 * priority, but do not requeue it. An IPI is in z_thread_prio_set()
721 thread->base.prio = prio; in z_thread_prio_set()
727 flag_ipi(IPI_CPU_MASK(cpu->id)); in z_thread_prio_set()
733 thread->base.prio = prio; in z_thread_prio_set()
745 _current_cpu->swap_ok = 0; in resched()
801 __ASSERT(arch_current_thread()->base.sched_locked != 0U, ""); in k_sched_unlock()
804 ++arch_current_thread()->base.sched_locked; in k_sched_unlock()
809 arch_current_thread(), arch_current_thread()->base.sched_locked); in k_sched_unlock()
846 * of a non-recursed interrupt. This function determines which thread to
849 * - The handle for the interrupted thread in which case the thread's context
852 * - NULL if more work is required to fully save the thread's state after
866 * to the interrupted thread anymore. It might be necessary to make a local
884 old_thread->switch_handle = NULL; in z_get_next_switch_handle()
897 _current_cpu->swap_ok = 0; in z_get_next_switch_handle()
898 cpu_id = arch_curr_cpu()->id; in z_get_next_switch_handle()
899 new_thread->base.cpu = cpu_id; in z_get_next_switch_handle()
923 if ((new_thread->base.cpu_mask != -1) && in z_get_next_switch_handle()
924 (old_thread->base.cpu_mask != BIT(cpu_id))) { in z_get_next_switch_handle()
931 old_thread->switch_handle = interrupted; in z_get_next_switch_handle()
932 ret = new_thread->switch_handle; in z_get_next_switch_handle()
935 new_thread->switch_handle = NULL; in z_get_next_switch_handle()
942 arch_current_thread()->switch_handle = interrupted; in z_get_next_switch_handle()
944 return arch_current_thread()->switch_handle; in z_get_next_switch_handle()
965 _priq_run_init(&ready_q->runq); in init_ready_q()
990 (arch_current_thread()->base.sched_locked == 0U))) { in z_impl_k_thread_priority_set()
1002 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, in z_vrfy_k_thread_priority_set()
1004 prio, thread->base.prio)); in z_vrfy_k_thread_priority_set()
1029 thread->base.prio_deadline = newdl; in z_impl_k_thread_deadline_set()
1032 thread->base.prio_deadline = newdl; in z_impl_k_thread_deadline_set()
1135 uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32(); in z_tick_sleep()
1137 /* To handle a negative value correctly, once type-cast it to signed 32 bit */ in z_tick_sleep()
1275 if ((thread->base.thread_state & new_state) == 0U) { in halt_thread()
1276 thread->base.thread_state |= new_state; in halt_thread()
1282 if (thread->base.pended_on != NULL) { in halt_thread()
1286 unpend_all(&thread->join_queue); in halt_thread()
1291 * switch doesn't clobber the now-freed in halt_thread()
1299 unpend_all(&thread->halt_queue); in halt_thread()
1329 k_object_uninit(thread->stack_obj); in halt_thread()
1341 * code. Note that we must leave a non-null switch in halt_thread()
1348 arch_current_thread()->switch_handle = arch_current_thread(); in halt_thread()
1373 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_thread_abort()
1388 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0); in z_impl_k_thread_abort()
1401 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { in z_impl_k_thread_join()
1405 ret = -EBUSY; in z_impl_k_thread_join()
1407 (thread->base.pended_on == &arch_current_thread()->join_queue)) { in z_impl_k_thread_join()
1408 ret = -EDEADLK; in z_impl_k_thread_join()
1411 add_to_waitq_locked(arch_current_thread(), &thread->join_queue); in z_impl_k_thread_join()
1429 * the initialization bit does double-duty for thread objects; if false, means
1444 case -EINVAL: in thread_obj_validate()
1489 thread = _priq_wait_best(&wait_q->waitq); in z_sched_wake()
1511 *data = arch_current_thread()->base.swap_data; in z_sched_wait()