1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <wait_q.h>
10 #include <kthread.h>
11 #include <priority_q.h>
12 #include <kswap.h>
13 #include <ipi.h>
14 #include <kernel_arch_func.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/drivers/timer/system_timer.h>
17 #include <stdbool.h>
18 #include <kernel_internal.h>
19 #include <zephyr/logging/log.h>
20 #include <zephyr/sys/atomic.h>
21 #include <zephyr/sys/math_extras.h>
22 #include <zephyr/timing/timing.h>
23 #include <zephyr/sys/util.h>
24
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26
27 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28 extern struct k_thread *pending_current;
29 #endif
30
31 struct k_spinlock _sched_spinlock;
32
33 /* Storage to "complete" the context switch from an invalid/incomplete thread
34 * context (ex: exiting an ISR that aborted _current)
35 */
36 __incoherent struct k_thread _thread_dummy;
37
38 static void update_cache(int preempt_ok);
39 static void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
41
42
43 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 "threads.");
47
48 /*
49 * Return value same as e.g. memcmp
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
53 * Do not rely on the actual value returned aside from the above.
54 * (Again, like memcmp.)
55 */
z_sched_prio_cmp(struct k_thread * thread_1,struct k_thread * thread_2)56 int32_t z_sched_prio_cmp(struct k_thread *thread_1,
57 struct k_thread *thread_2)
58 {
59 /* `prio` is <32b, so the below cannot overflow. */
60 int32_t b1 = thread_1->base.prio;
61 int32_t b2 = thread_2->base.prio;
62
63 if (b1 != b2) {
64 return b2 - b1;
65 }
66
67 #ifdef CONFIG_SCHED_DEADLINE
68 /* If we assume all deadlines live within the same "half" of
69 * the 32 bit modulus space (this is a documented API rule),
70 * then the latest deadline in the queue minus the earliest is
71 * guaranteed to be (2's complement) non-negative. We can
72 * leverage that to compare the values without having to check
73 * the current time.
74 */
75 uint32_t d1 = thread_1->base.prio_deadline;
76 uint32_t d2 = thread_2->base.prio_deadline;
77
78 if (d1 != d2) {
79 /* Sooner deadline means higher effective priority.
80 * Doing the calculation with unsigned types and casting
81 * to signed isn't perfect, but at least reduces this
82 * from UB on overflow to impdef.
83 */
84 return (int32_t) (d2 - d1);
85 }
86 #endif /* CONFIG_SCHED_DEADLINE */
87 return 0;
88 }
89
thread_runq(struct k_thread * thread)90 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
91 {
92 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
93 int cpu, m = thread->base.cpu_mask;
94
95 /* Edge case: it's legal per the API to "make runnable" a
96 * thread with all CPUs masked off (i.e. one that isn't
97 * actually runnable!). Sort of a wart in the API and maybe
98 * we should address this in docs/assertions instead to avoid
99 * the extra test.
100 */
101 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
102
103 return &_kernel.cpus[cpu].ready_q.runq;
104 #else
105 ARG_UNUSED(thread);
106 return &_kernel.ready_q.runq;
107 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
108 }
109
curr_cpu_runq(void)110 static ALWAYS_INLINE void *curr_cpu_runq(void)
111 {
112 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
113 return &arch_curr_cpu()->ready_q.runq;
114 #else
115 return &_kernel.ready_q.runq;
116 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
117 }
118
runq_add(struct k_thread * thread)119 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
120 {
121 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
122
123 _priq_run_add(thread_runq(thread), thread);
124 }
125
runq_remove(struct k_thread * thread)126 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
127 {
128 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
129
130 _priq_run_remove(thread_runq(thread), thread);
131 }
132
runq_best(void)133 static ALWAYS_INLINE struct k_thread *runq_best(void)
134 {
135 return _priq_run_best(curr_cpu_runq());
136 }
137
138 /* _current is never in the run queue until context switch on
139 * SMP configurations, see z_requeue_current()
140 */
should_queue_thread(struct k_thread * thread)141 static inline bool should_queue_thread(struct k_thread *thread)
142 {
143 return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
144 }
145
queue_thread(struct k_thread * thread)146 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
147 {
148 thread->base.thread_state |= _THREAD_QUEUED;
149 if (should_queue_thread(thread)) {
150 runq_add(thread);
151 }
152 #ifdef CONFIG_SMP
153 if (thread == _current) {
154 /* add current to end of queue means "yield" */
155 _current_cpu->swap_ok = true;
156 }
157 #endif /* CONFIG_SMP */
158 }
159
dequeue_thread(struct k_thread * thread)160 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
161 {
162 thread->base.thread_state &= ~_THREAD_QUEUED;
163 if (should_queue_thread(thread)) {
164 runq_remove(thread);
165 }
166 }
167
168 /* Called out of z_swap() when CONFIG_SMP. The current thread can
169 * never live in the run queue until we are inexorably on the context
170 * switch path on SMP, otherwise there is a deadlock condition where a
171 * set of CPUs pick a cycle of threads to run and wait for them all to
172 * context switch forever.
173 */
z_requeue_current(struct k_thread * thread)174 void z_requeue_current(struct k_thread *thread)
175 {
176 if (z_is_thread_queued(thread)) {
177 runq_add(thread);
178 }
179 signal_pending_ipi();
180 }
181
182 /* Return true if the thread is aborting, else false */
is_aborting(struct k_thread * thread)183 static inline bool is_aborting(struct k_thread *thread)
184 {
185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
186 }
187
188 /* Return true if the thread is aborting or suspending, else false */
is_halting(struct k_thread * thread)189 static inline bool is_halting(struct k_thread *thread)
190 {
191 return (thread->base.thread_state &
192 (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
193 }
194
195 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)196 static inline void clear_halting(struct k_thread *thread)
197 {
198 barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
200 }
201
next_up(void)202 static ALWAYS_INLINE struct k_thread *next_up(void)
203 {
204 #ifdef CONFIG_SMP
205 if (is_halting(_current)) {
206 halt_thread(_current, is_aborting(_current) ?
207 _THREAD_DEAD : _THREAD_SUSPENDED);
208 }
209 #endif /* CONFIG_SMP */
210
211 struct k_thread *thread = runq_best();
212
213 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
214 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
215 /* MetaIRQs must always attempt to return back to a
216 * cooperative thread they preempted and not whatever happens
217 * to be highest priority now. The cooperative thread was
218 * promised it wouldn't be preempted (by non-metairq threads)!
219 */
220 struct k_thread *mirqp = _current_cpu->metairq_preempted;
221
222 if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
223 if (!z_is_thread_prevented_from_running(mirqp)) {
224 thread = mirqp;
225 } else {
226 _current_cpu->metairq_preempted = NULL;
227 }
228 }
229 #endif
230 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
231 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
232 */
233
234 #ifndef CONFIG_SMP
235 /* In uniprocessor mode, we can leave the current thread in
236 * the queue (actually we have to, otherwise the assembly
237 * context switch code for all architectures would be
238 * responsible for putting it back in z_swap and ISR return!),
239 * which makes this choice simple.
240 */
241 return (thread != NULL) ? thread : _current_cpu->idle_thread;
242 #else
243 /* Under SMP, the "cache" mechanism for selecting the next
244 * thread doesn't work, so we have more work to do to test
245 * _current against the best choice from the queue. Here, the
246 * thread selected above represents "the best thread that is
247 * not current".
248 *
249 * Subtle note on "queued": in SMP mode, _current does not
250 * live in the queue, so this isn't exactly the same thing as
251 * "ready", it means "is _current already added back to the
252 * queue such that we don't want to re-add it".
253 */
254 bool queued = z_is_thread_queued(_current);
255 bool active = !z_is_thread_prevented_from_running(_current);
256
257 if (thread == NULL) {
258 thread = _current_cpu->idle_thread;
259 }
260
261 if (active) {
262 int32_t cmp = z_sched_prio_cmp(_current, thread);
263
264 /* Ties only switch if state says we yielded */
265 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
266 thread = _current;
267 }
268
269 if (!should_preempt(thread, _current_cpu->swap_ok)) {
270 thread = _current;
271 }
272 }
273
274 /* Put _current back into the queue */
275 if ((thread != _current) && active &&
276 !z_is_idle_thread_object(_current) && !queued) {
277 queue_thread(_current);
278 }
279
280 /* Take the new _current out of the queue */
281 if (z_is_thread_queued(thread)) {
282 dequeue_thread(thread);
283 }
284
285 _current_cpu->swap_ok = false;
286 return thread;
287 #endif /* CONFIG_SMP */
288 }
289
move_thread_to_end_of_prio_q(struct k_thread * thread)290 void move_thread_to_end_of_prio_q(struct k_thread *thread)
291 {
292 if (z_is_thread_queued(thread)) {
293 dequeue_thread(thread);
294 }
295 queue_thread(thread);
296 update_cache(thread == _current);
297 }
298
299 /* Track cooperative threads preempted by metairqs so we can return to
300 * them specifically. Called at the moment a new thread has been
301 * selected to run.
302 */
update_metairq_preempt(struct k_thread * thread)303 static void update_metairq_preempt(struct k_thread *thread)
304 {
305 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
306 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
307 if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
308 !thread_is_preemptible(_current)) {
309 /* Record new preemption */
310 _current_cpu->metairq_preempted = _current;
311 } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
312 /* Returning from existing preemption */
313 _current_cpu->metairq_preempted = NULL;
314 }
315 #else
316 ARG_UNUSED(thread);
317 #endif
318 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
319 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
320 */
321 }
322
update_cache(int preempt_ok)323 static void update_cache(int preempt_ok)
324 {
325 #ifndef CONFIG_SMP
326 struct k_thread *thread = next_up();
327
328 if (should_preempt(thread, preempt_ok)) {
329 #ifdef CONFIG_TIMESLICING
330 if (thread != _current) {
331 z_reset_time_slice(thread);
332 }
333 #endif /* CONFIG_TIMESLICING */
334 update_metairq_preempt(thread);
335 _kernel.ready_q.cache = thread;
336 } else {
337 _kernel.ready_q.cache = _current;
338 }
339
340 #else
341 /* The way this works is that the CPU record keeps its
342 * "cooperative swapping is OK" flag until the next reschedule
343 * call or context switch. It doesn't need to be tracked per
344 * thread because if the thread gets preempted for whatever
345 * reason the scheduler will make the same decision anyway.
346 */
347 _current_cpu->swap_ok = preempt_ok;
348 #endif /* CONFIG_SMP */
349 }
350
thread_active_elsewhere(struct k_thread * thread)351 static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
352 {
353 /* Returns pointer to _cpu if the thread is currently running on
354 * another CPU. There are more scalable designs to answer this
355 * question in constant time, but this is fine for now.
356 */
357 #ifdef CONFIG_SMP
358 int currcpu = _current_cpu->id;
359
360 unsigned int num_cpus = arch_num_cpus();
361
362 for (int i = 0; i < num_cpus; i++) {
363 if ((i != currcpu) &&
364 (_kernel.cpus[i].current == thread)) {
365 return &_kernel.cpus[i];
366 }
367 }
368 #endif /* CONFIG_SMP */
369 ARG_UNUSED(thread);
370 return NULL;
371 }
372
ready_thread(struct k_thread * thread)373 static void ready_thread(struct k_thread *thread)
374 {
375 #ifdef CONFIG_KERNEL_COHERENCE
376 __ASSERT_NO_MSG(arch_mem_coherent(thread));
377 #endif /* CONFIG_KERNEL_COHERENCE */
378
379 /* If thread is queued already, do not try and added it to the
380 * run queue again
381 */
382 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
383 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
384
385 queue_thread(thread);
386 update_cache(0);
387
388 flag_ipi(ipi_mask_create(thread));
389 }
390 }
391
z_ready_thread_locked(struct k_thread * thread)392 void z_ready_thread_locked(struct k_thread *thread)
393 {
394 if (thread_active_elsewhere(thread) == NULL) {
395 ready_thread(thread);
396 }
397 }
398
z_ready_thread(struct k_thread * thread)399 void z_ready_thread(struct k_thread *thread)
400 {
401 K_SPINLOCK(&_sched_spinlock) {
402 if (thread_active_elsewhere(thread) == NULL) {
403 ready_thread(thread);
404 }
405 }
406 }
407
z_move_thread_to_end_of_prio_q(struct k_thread * thread)408 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
409 {
410 K_SPINLOCK(&_sched_spinlock) {
411 move_thread_to_end_of_prio_q(thread);
412 }
413 }
414
z_sched_start(struct k_thread * thread)415 void z_sched_start(struct k_thread *thread)
416 {
417 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
418
419 if (z_has_thread_started(thread)) {
420 k_spin_unlock(&_sched_spinlock, key);
421 return;
422 }
423
424 z_mark_thread_as_started(thread);
425 ready_thread(thread);
426 z_reschedule(&_sched_spinlock, key);
427 }
428
429 /* Spins in ISR context, waiting for a thread known to be running on
430 * another CPU to catch the IPI we sent and halt. Note that we check
431 * for ourselves being asynchronously halted first to prevent simple
432 * deadlocks (but not complex ones involving cycles of 3+ threads!).
433 * Acts to release the provided lock before returning.
434 */
thread_halt_spin(struct k_thread * thread,k_spinlock_key_t key)435 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
436 {
437 if (is_halting(_current)) {
438 halt_thread(_current,
439 is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
440 }
441 k_spin_unlock(&_sched_spinlock, key);
442 while (is_halting(thread)) {
443 unsigned int k = arch_irq_lock();
444
445 arch_spin_relax(); /* Requires interrupts be masked */
446 arch_irq_unlock(k);
447 }
448 }
449
450 /* Shared handler for k_thread_{suspend,abort}(). Called with the
451 * scheduler lock held and the key passed (which it may
452 * release/reacquire!) which will be released before a possible return
453 * (aborting _current will not return, obviously), which may be after
454 * a context switch.
455 */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)456 static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
457 bool terminate)
458 {
459 _wait_q_t *wq = &thread->join_queue;
460 #ifdef CONFIG_SMP
461 wq = terminate ? wq : &thread->halt_queue;
462 #endif
463
464 /* If the target is a thread running on another CPU, flag and
465 * poke (note that we might spin to wait, so a true
466 * synchronous IPI is needed here, not deferred!), it will
467 * halt itself in the IPI. Otherwise it's unscheduled, so we
468 * can clean it up directly.
469 */
470
471 struct _cpu *cpu = thread_active_elsewhere(thread);
472
473 if (cpu != NULL) {
474 thread->base.thread_state |= (terminate ? _THREAD_ABORTING
475 : _THREAD_SUSPENDING);
476 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
477 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
478 arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
479 #else
480 arch_sched_broadcast_ipi();
481 #endif
482 #endif
483 if (arch_is_in_isr()) {
484 thread_halt_spin(thread, key);
485 } else {
486 add_to_waitq_locked(_current, wq);
487 z_swap(&_sched_spinlock, key);
488 }
489 } else {
490 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
491 if ((thread == _current) && !arch_is_in_isr()) {
492 z_swap(&_sched_spinlock, key);
493 __ASSERT(!terminate, "aborted _current back from dead");
494 } else {
495 k_spin_unlock(&_sched_spinlock, key);
496 }
497 }
498 /* NOTE: the scheduler lock has been released. Don't put
499 * logic here, it's likely to be racy/deadlocky even if you
500 * re-take the lock!
501 */
502 }
503
504
z_impl_k_thread_suspend(k_tid_t thread)505 void z_impl_k_thread_suspend(k_tid_t thread)
506 {
507 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
508
509 (void)z_abort_thread_timeout(thread);
510
511 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
512
513 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
514
515 /* The target thread is already suspended. Nothing to do. */
516
517 k_spin_unlock(&_sched_spinlock, key);
518 return;
519 }
520
521 z_thread_halt(thread, key, false);
522
523 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
524 }
525
526 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(k_tid_t thread)527 static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
528 {
529 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
530 z_impl_k_thread_suspend(thread);
531 }
532 #include <zephyr/syscalls/k_thread_suspend_mrsh.c>
533 #endif /* CONFIG_USERSPACE */
534
z_impl_k_thread_resume(k_tid_t thread)535 void z_impl_k_thread_resume(k_tid_t thread)
536 {
537 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
538
539 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
540
541 /* Do not try to resume a thread that was not suspended */
542 if (!z_is_thread_suspended(thread)) {
543 k_spin_unlock(&_sched_spinlock, key);
544 return;
545 }
546
547 z_mark_thread_as_not_suspended(thread);
548 ready_thread(thread);
549
550 z_reschedule(&_sched_spinlock, key);
551
552 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
553 }
554
555 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(k_tid_t thread)556 static inline void z_vrfy_k_thread_resume(k_tid_t thread)
557 {
558 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
559 z_impl_k_thread_resume(thread);
560 }
561 #include <zephyr/syscalls/k_thread_resume_mrsh.c>
562 #endif /* CONFIG_USERSPACE */
563
pended_on_thread(struct k_thread * thread)564 static _wait_q_t *pended_on_thread(struct k_thread *thread)
565 {
566 __ASSERT_NO_MSG(thread->base.pended_on);
567
568 return thread->base.pended_on;
569 }
570
unready_thread(struct k_thread * thread)571 static void unready_thread(struct k_thread *thread)
572 {
573 if (z_is_thread_queued(thread)) {
574 dequeue_thread(thread);
575 }
576 update_cache(thread == _current);
577 }
578
579 /* _sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)580 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
581 {
582 unready_thread(thread);
583 z_mark_thread_as_pending(thread);
584
585 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
586
587 if (wait_q != NULL) {
588 thread->base.pended_on = wait_q;
589 _priq_wait_add(&wait_q->waitq, thread);
590 }
591 }
592
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)593 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
594 {
595 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
596 z_add_thread_timeout(thread, timeout);
597 }
598 }
599
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)600 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
601 k_timeout_t timeout)
602 {
603 #ifdef CONFIG_KERNEL_COHERENCE
604 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
605 #endif /* CONFIG_KERNEL_COHERENCE */
606 add_to_waitq_locked(thread, wait_q);
607 add_thread_timeout(thread, timeout);
608 }
609
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)610 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
611 k_timeout_t timeout)
612 {
613 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
614 K_SPINLOCK(&_sched_spinlock) {
615 pend_locked(thread, wait_q, timeout);
616 }
617 }
618
unpend_thread_no_timeout(struct k_thread * thread)619 static inline void unpend_thread_no_timeout(struct k_thread *thread)
620 {
621 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
622 z_mark_thread_as_not_pending(thread);
623 thread->base.pended_on = NULL;
624 }
625
z_unpend_thread_no_timeout(struct k_thread * thread)626 void z_unpend_thread_no_timeout(struct k_thread *thread)
627 {
628 K_SPINLOCK(&_sched_spinlock) {
629 if (thread->base.pended_on != NULL) {
630 unpend_thread_no_timeout(thread);
631 }
632 }
633 }
634
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)635 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
636 {
637 K_SPINLOCK(&_sched_spinlock) {
638 bool killed = (thread->base.thread_state &
639 (_THREAD_DEAD | _THREAD_ABORTING));
640
641 #ifdef CONFIG_EVENTS
642 bool do_nothing = thread->no_wake_on_timeout && is_timeout;
643
644 thread->no_wake_on_timeout = false;
645
646 if (do_nothing) {
647 continue;
648 }
649 #endif /* CONFIG_EVENTS */
650
651 if (!killed) {
652 /* The thread is not being killed */
653 if (thread->base.pended_on != NULL) {
654 unpend_thread_no_timeout(thread);
655 }
656 z_mark_thread_as_started(thread);
657 if (is_timeout) {
658 z_mark_thread_as_not_suspended(thread);
659 }
660 ready_thread(thread);
661 }
662 }
663
664 }
665
666 #ifdef CONFIG_SYS_CLOCK_EXISTS
667 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)668 void z_thread_timeout(struct _timeout *timeout)
669 {
670 struct k_thread *thread = CONTAINER_OF(timeout,
671 struct k_thread, base.timeout);
672
673 z_sched_wake_thread(thread, true);
674 }
675 #endif /* CONFIG_SYS_CLOCK_EXISTS */
676
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)677 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
678 _wait_q_t *wait_q, k_timeout_t timeout)
679 {
680 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
681 pending_current = _current;
682 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
683 __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
684
685 /* We do a "lock swap" prior to calling z_swap(), such that
686 * the caller's lock gets released as desired. But we ensure
687 * that we hold the scheduler lock and leave local interrupts
688 * masked until we reach the context switch. z_swap() itself
689 * has similar code; the duplication is because it's a legacy
690 * API that doesn't expect to be called with scheduler lock
691 * held.
692 */
693 (void) k_spin_lock(&_sched_spinlock);
694 pend_locked(_current, wait_q, timeout);
695 k_spin_release(lock);
696 return z_swap(&_sched_spinlock, key);
697 }
698
z_unpend1_no_timeout(_wait_q_t * wait_q)699 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
700 {
701 struct k_thread *thread = NULL;
702
703 K_SPINLOCK(&_sched_spinlock) {
704 thread = _priq_wait_best(&wait_q->waitq);
705
706 if (thread != NULL) {
707 unpend_thread_no_timeout(thread);
708 }
709 }
710
711 return thread;
712 }
713
z_unpend_first_thread(_wait_q_t * wait_q)714 struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
715 {
716 struct k_thread *thread = NULL;
717
718 K_SPINLOCK(&_sched_spinlock) {
719 thread = _priq_wait_best(&wait_q->waitq);
720
721 if (thread != NULL) {
722 unpend_thread_no_timeout(thread);
723 (void)z_abort_thread_timeout(thread);
724 }
725 }
726
727 return thread;
728 }
729
z_unpend_thread(struct k_thread * thread)730 void z_unpend_thread(struct k_thread *thread)
731 {
732 z_unpend_thread_no_timeout(thread);
733 (void)z_abort_thread_timeout(thread);
734 }
735
736 /* Priority set utility that does no rescheduling, it just changes the
737 * run queue state, returning true if a reschedule is needed later.
738 */
z_thread_prio_set(struct k_thread * thread,int prio)739 bool z_thread_prio_set(struct k_thread *thread, int prio)
740 {
741 bool need_sched = 0;
742 int old_prio = thread->base.prio;
743
744 K_SPINLOCK(&_sched_spinlock) {
745 need_sched = z_is_thread_ready(thread);
746
747 if (need_sched) {
748 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
749 dequeue_thread(thread);
750 thread->base.prio = prio;
751 queue_thread(thread);
752
753 if (old_prio > prio) {
754 flag_ipi(ipi_mask_create(thread));
755 }
756 } else {
757 /*
758 * This is a running thread on SMP. Update its
759 * priority, but do not requeue it. An IPI is
760 * needed if the priority is both being lowered
761 * and it is running on another CPU.
762 */
763
764 thread->base.prio = prio;
765
766 struct _cpu *cpu;
767
768 cpu = thread_active_elsewhere(thread);
769 if ((cpu != NULL) && (old_prio < prio)) {
770 flag_ipi(IPI_CPU_MASK(cpu->id));
771 }
772 }
773
774 update_cache(1);
775 } else {
776 thread->base.prio = prio;
777 }
778 }
779
780 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
781
782 return need_sched;
783 }
784
resched(uint32_t key)785 static inline bool resched(uint32_t key)
786 {
787 #ifdef CONFIG_SMP
788 _current_cpu->swap_ok = 0;
789 #endif /* CONFIG_SMP */
790
791 return arch_irq_unlocked(key) && !arch_is_in_isr();
792 }
793
794 /*
795 * Check if the next ready thread is the same as the current thread
796 * and save the trip if true.
797 */
need_swap(void)798 static inline bool need_swap(void)
799 {
800 /* the SMP case will be handled in C based z_swap() */
801 #ifdef CONFIG_SMP
802 return true;
803 #else
804 struct k_thread *new_thread;
805
806 /* Check if the next ready thread is the same as the current thread */
807 new_thread = _kernel.ready_q.cache;
808 return new_thread != _current;
809 #endif /* CONFIG_SMP */
810 }
811
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)812 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
813 {
814 if (resched(key.key) && need_swap()) {
815 z_swap(lock, key);
816 } else {
817 k_spin_unlock(lock, key);
818 signal_pending_ipi();
819 }
820 }
821
z_reschedule_irqlock(uint32_t key)822 void z_reschedule_irqlock(uint32_t key)
823 {
824 if (resched(key) && need_swap()) {
825 z_swap_irqlock(key);
826 } else {
827 irq_unlock(key);
828 signal_pending_ipi();
829 }
830 }
831
k_sched_lock(void)832 void k_sched_lock(void)
833 {
834 K_SPINLOCK(&_sched_spinlock) {
835 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
836
837 z_sched_lock();
838 }
839 }
840
k_sched_unlock(void)841 void k_sched_unlock(void)
842 {
843 K_SPINLOCK(&_sched_spinlock) {
844 __ASSERT(_current->base.sched_locked != 0U, "");
845 __ASSERT(!arch_is_in_isr(), "");
846
847 ++_current->base.sched_locked;
848 update_cache(0);
849 }
850
851 LOG_DBG("scheduler unlocked (%p:%d)",
852 _current, _current->base.sched_locked);
853
854 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
855
856 z_reschedule_unlocked();
857 }
858
z_swap_next_thread(void)859 struct k_thread *z_swap_next_thread(void)
860 {
861 #ifdef CONFIG_SMP
862 struct k_thread *ret = next_up();
863
864 if (ret == _current) {
865 /* When not swapping, have to signal IPIs here. In
866 * the context switch case it must happen later, after
867 * _current gets requeued.
868 */
869 signal_pending_ipi();
870 }
871 return ret;
872 #else
873 return _kernel.ready_q.cache;
874 #endif /* CONFIG_SMP */
875 }
876
877 #ifdef CONFIG_USE_SWITCH
878 /* Just a wrapper around _current = xxx with tracing */
set_current(struct k_thread * new_thread)879 static inline void set_current(struct k_thread *new_thread)
880 {
881 z_thread_mark_switched_out();
882 _current_cpu->current = new_thread;
883 }
884
885 /**
886 * @brief Determine next thread to execute upon completion of an interrupt
887 *
888 * Thread preemption is performed by context switching after the completion
889 * of a non-recursed interrupt. This function determines which thread to
890 * switch to if any. This function accepts as @p interrupted either:
891 *
892 * - The handle for the interrupted thread in which case the thread's context
893 * must already be fully saved and ready to be picked up by a different CPU.
894 *
895 * - NULL if more work is required to fully save the thread's state after
896 * it is known that a new thread is to be scheduled. It is up to the caller
897 * to store the handle resulting from the thread that is being switched out
898 * in that thread's "switch_handle" field after its
899 * context has fully been saved, following the same requirements as with
900 * the @ref arch_switch() function.
901 *
902 * If a new thread needs to be scheduled then its handle is returned.
903 * Otherwise the same value provided as @p interrupted is returned back.
904 * Those handles are the same opaque types used by the @ref arch_switch()
905 * function.
906 *
907 * @warning
908 * The @ref _current value may have changed after this call and not refer
909 * to the interrupted thread anymore. It might be necessary to make a local
910 * copy before calling this function.
911 *
912 * @param interrupted Handle for the thread that was interrupted or NULL.
913 * @retval Handle for the next thread to execute, or @p interrupted when
914 * no new thread is to be scheduled.
915 */
z_get_next_switch_handle(void * interrupted)916 void *z_get_next_switch_handle(void *interrupted)
917 {
918 z_check_stack_sentinel();
919
920 #ifdef CONFIG_SMP
921 void *ret = NULL;
922
923 K_SPINLOCK(&_sched_spinlock) {
924 struct k_thread *old_thread = _current, *new_thread;
925
926 if (IS_ENABLED(CONFIG_SMP)) {
927 old_thread->switch_handle = NULL;
928 }
929 new_thread = next_up();
930
931 z_sched_usage_switch(new_thread);
932
933 if (old_thread != new_thread) {
934 uint8_t cpu_id;
935
936 update_metairq_preempt(new_thread);
937 z_sched_switch_spin(new_thread);
938 arch_cohere_stacks(old_thread, interrupted, new_thread);
939
940 _current_cpu->swap_ok = 0;
941 cpu_id = arch_curr_cpu()->id;
942 new_thread->base.cpu = cpu_id;
943 set_current(new_thread);
944
945 #ifdef CONFIG_TIMESLICING
946 z_reset_time_slice(new_thread);
947 #endif /* CONFIG_TIMESLICING */
948
949 #ifdef CONFIG_SPIN_VALIDATE
950 /* Changed _current! Update the spinlock
951 * bookkeeping so the validation doesn't get
952 * confused when the "wrong" thread tries to
953 * release the lock.
954 */
955 z_spin_lock_set_owner(&_sched_spinlock);
956 #endif /* CONFIG_SPIN_VALIDATE */
957
958 /* A queued (runnable) old/current thread
959 * needs to be added back to the run queue
960 * here, and atomically with its switch handle
961 * being set below. This is safe now, as we
962 * will not return into it.
963 */
964 if (z_is_thread_queued(old_thread)) {
965 #ifdef CONFIG_SCHED_IPI_CASCADE
966 if ((new_thread->base.cpu_mask != -1) &&
967 (old_thread->base.cpu_mask != BIT(cpu_id))) {
968 flag_ipi(ipi_mask_create(old_thread));
969 }
970 #endif
971 runq_add(old_thread);
972 }
973 }
974 old_thread->switch_handle = interrupted;
975 ret = new_thread->switch_handle;
976 if (IS_ENABLED(CONFIG_SMP)) {
977 /* Active threads MUST have a null here */
978 new_thread->switch_handle = NULL;
979 }
980 }
981 signal_pending_ipi();
982 return ret;
983 #else
984 z_sched_usage_switch(_kernel.ready_q.cache);
985 _current->switch_handle = interrupted;
986 set_current(_kernel.ready_q.cache);
987 return _current->switch_handle;
988 #endif /* CONFIG_SMP */
989 }
990 #endif /* CONFIG_USE_SWITCH */
991
z_unpend_all(_wait_q_t * wait_q)992 int z_unpend_all(_wait_q_t *wait_q)
993 {
994 int need_sched = 0;
995 struct k_thread *thread;
996
997 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
998 z_unpend_thread(thread);
999 z_ready_thread(thread);
1000 need_sched = 1;
1001 }
1002
1003 return need_sched;
1004 }
1005
init_ready_q(struct _ready_q * ready_q)1006 void init_ready_q(struct _ready_q *ready_q)
1007 {
1008 #if defined(CONFIG_SCHED_SCALABLE)
1009 ready_q->runq = (struct _priq_rb) {
1010 .tree = {
1011 .lessthan_fn = z_priq_rb_lessthan,
1012 }
1013 };
1014 #elif defined(CONFIG_SCHED_MULTIQ)
1015 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
1016 sys_dlist_init(&ready_q->runq.queues[i]);
1017 }
1018 #else
1019 sys_dlist_init(&ready_q->runq);
1020 #endif
1021 }
1022
z_sched_init(void)1023 void z_sched_init(void)
1024 {
1025 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1026 for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
1027 init_ready_q(&_kernel.cpus[i].ready_q);
1028 }
1029 #else
1030 init_ready_q(&_kernel.ready_q);
1031 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
1032 }
1033
z_impl_k_thread_priority_set(k_tid_t thread,int prio)1034 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
1035 {
1036 /*
1037 * Use NULL, since we cannot know what the entry point is (we do not
1038 * keep track of it) and idle cannot change its priority.
1039 */
1040 Z_ASSERT_VALID_PRIO(prio, NULL);
1041
1042 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
1043
1044 if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
1045 (_current->base.sched_locked == 0U))) {
1046 z_reschedule_unlocked();
1047 }
1048 }
1049
1050 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)1051 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
1052 {
1053 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1054 K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
1055 "invalid thread priority %d", prio));
1056 #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
1057 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
1058 "thread priority may only be downgraded (%d < %d)",
1059 prio, thread->base.prio));
1060 #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
1061 z_impl_k_thread_priority_set(thread, prio);
1062 }
1063 #include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
1064 #endif /* CONFIG_USERSPACE */
1065
1066 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1067 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1068 {
1069
1070 deadline = CLAMP(deadline, 0, INT_MAX);
1071
1072 struct k_thread *thread = tid;
1073 int32_t newdl = k_cycle_get_32() + deadline;
1074
1075 /* The prio_deadline field changes the sorting order, so can't
1076 * change it while the thread is in the run queue (dlists
1077 * actually are benign as long as we requeue it before we
1078 * release the lock, but an rbtree will blow up if we break
1079 * sorting!)
1080 */
1081 K_SPINLOCK(&_sched_spinlock) {
1082 if (z_is_thread_queued(thread)) {
1083 dequeue_thread(thread);
1084 thread->base.prio_deadline = newdl;
1085 queue_thread(thread);
1086 } else {
1087 thread->base.prio_deadline = newdl;
1088 }
1089 }
1090 }
1091
1092 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1093 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1094 {
1095 struct k_thread *thread = tid;
1096
1097 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1098 K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1099 "invalid thread deadline %d",
1100 (int)deadline));
1101
1102 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1103 }
1104 #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
1105 #endif /* CONFIG_USERSPACE */
1106 #endif /* CONFIG_SCHED_DEADLINE */
1107
k_can_yield(void)1108 bool k_can_yield(void)
1109 {
1110 return !(k_is_pre_kernel() || k_is_in_isr() ||
1111 z_is_idle_thread_object(_current));
1112 }
1113
z_impl_k_yield(void)1114 void z_impl_k_yield(void)
1115 {
1116 __ASSERT(!arch_is_in_isr(), "");
1117
1118 SYS_PORT_TRACING_FUNC(k_thread, yield);
1119
1120 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1121
1122 if (!IS_ENABLED(CONFIG_SMP) ||
1123 z_is_thread_queued(_current)) {
1124 dequeue_thread(_current);
1125 }
1126 queue_thread(_current);
1127 update_cache(1);
1128 z_swap(&_sched_spinlock, key);
1129 }
1130
1131 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1132 static inline void z_vrfy_k_yield(void)
1133 {
1134 z_impl_k_yield();
1135 }
1136 #include <zephyr/syscalls/k_yield_mrsh.c>
1137 #endif /* CONFIG_USERSPACE */
1138
z_tick_sleep(k_ticks_t ticks)1139 static int32_t z_tick_sleep(k_ticks_t ticks)
1140 {
1141 uint32_t expected_wakeup_ticks;
1142
1143 __ASSERT(!arch_is_in_isr(), "");
1144
1145 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
1146
1147 /* wait of 0 ms is treated as a 'yield' */
1148 if (ticks == 0) {
1149 k_yield();
1150 return 0;
1151 }
1152
1153 if (Z_TICK_ABS(ticks) <= 0) {
1154 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1155 } else {
1156 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1157 }
1158
1159 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
1160 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1161
1162 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1163 pending_current = _current;
1164 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
1165 unready_thread(_current);
1166 z_add_thread_timeout(_current, timeout);
1167 z_mark_thread_as_suspended(_current);
1168
1169 (void)z_swap(&_sched_spinlock, key);
1170
1171 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1172
1173 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
1174 if (ticks > 0) {
1175 return ticks;
1176 }
1177
1178 return 0;
1179 }
1180
z_impl_k_sleep(k_timeout_t timeout)1181 int32_t z_impl_k_sleep(k_timeout_t timeout)
1182 {
1183 k_ticks_t ticks;
1184
1185 __ASSERT(!arch_is_in_isr(), "");
1186
1187 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1188
1189 /* in case of K_FOREVER, we suspend */
1190 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1191
1192 k_thread_suspend(_current);
1193 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1194
1195 return (int32_t) K_TICKS_FOREVER;
1196 }
1197
1198 ticks = timeout.ticks;
1199
1200 ticks = z_tick_sleep(ticks);
1201
1202 int32_t ret = k_ticks_to_ms_ceil64(ticks);
1203
1204 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1205
1206 return ret;
1207 }
1208
1209 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1210 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1211 {
1212 return z_impl_k_sleep(timeout);
1213 }
1214 #include <zephyr/syscalls/k_sleep_mrsh.c>
1215 #endif /* CONFIG_USERSPACE */
1216
z_impl_k_usleep(int32_t us)1217 int32_t z_impl_k_usleep(int32_t us)
1218 {
1219 int32_t ticks;
1220
1221 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1222
1223 ticks = k_us_to_ticks_ceil64(us);
1224 ticks = z_tick_sleep(ticks);
1225
1226 int32_t ret = k_ticks_to_us_ceil64(ticks);
1227
1228 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1229
1230 return ret;
1231 }
1232
1233 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int32_t us)1234 static inline int32_t z_vrfy_k_usleep(int32_t us)
1235 {
1236 return z_impl_k_usleep(us);
1237 }
1238 #include <zephyr/syscalls/k_usleep_mrsh.c>
1239 #endif /* CONFIG_USERSPACE */
1240
z_impl_k_wakeup(k_tid_t thread)1241 void z_impl_k_wakeup(k_tid_t thread)
1242 {
1243 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1244
1245 if (z_is_thread_pending(thread)) {
1246 return;
1247 }
1248
1249 if (z_abort_thread_timeout(thread) < 0) {
1250 /* Might have just been sleeping forever */
1251 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1252 return;
1253 }
1254 }
1255
1256 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1257
1258 z_mark_thread_as_not_suspended(thread);
1259
1260 if (thread_active_elsewhere(thread) == NULL) {
1261 ready_thread(thread);
1262 }
1263
1264 if (arch_is_in_isr()) {
1265 k_spin_unlock(&_sched_spinlock, key);
1266 } else {
1267 z_reschedule(&_sched_spinlock, key);
1268 }
1269 }
1270
1271 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1272 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1273 {
1274 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1275 z_impl_k_wakeup(thread);
1276 }
1277 #include <zephyr/syscalls/k_wakeup_mrsh.c>
1278 #endif /* CONFIG_USERSPACE */
1279
z_impl_k_sched_current_thread_query(void)1280 k_tid_t z_impl_k_sched_current_thread_query(void)
1281 {
1282 #ifdef CONFIG_SMP
1283 /* In SMP, _current is a field read from _current_cpu, which
1284 * can race with preemption before it is read. We must lock
1285 * local interrupts when reading it.
1286 */
1287 unsigned int k = arch_irq_lock();
1288 #endif /* CONFIG_SMP */
1289
1290 k_tid_t ret = _current_cpu->current;
1291
1292 #ifdef CONFIG_SMP
1293 arch_irq_unlock(k);
1294 #endif /* CONFIG_SMP */
1295 return ret;
1296 }
1297
1298 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1299 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1300 {
1301 return z_impl_k_sched_current_thread_query();
1302 }
1303 #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
1304 #endif /* CONFIG_USERSPACE */
1305
unpend_all(_wait_q_t * wait_q)1306 static inline void unpend_all(_wait_q_t *wait_q)
1307 {
1308 struct k_thread *thread;
1309
1310 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
1311 unpend_thread_no_timeout(thread);
1312 (void)z_abort_thread_timeout(thread);
1313 arch_thread_return_value_set(thread, 0);
1314 ready_thread(thread);
1315 }
1316 }
1317
1318 #ifdef CONFIG_THREAD_ABORT_HOOK
1319 extern void thread_abort_hook(struct k_thread *thread);
1320 #endif /* CONFIG_THREAD_ABORT_HOOK */
1321
1322 /**
1323 * @brief Dequeues the specified thread
1324 *
1325 * Dequeues the specified thread and move it into the specified new state.
1326 *
1327 * @param thread Identify the thread to halt
1328 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1329 */
halt_thread(struct k_thread * thread,uint8_t new_state)1330 static void halt_thread(struct k_thread *thread, uint8_t new_state)
1331 {
1332 bool dummify = false;
1333
1334 /* We hold the lock, and the thread is known not to be running
1335 * anywhere.
1336 */
1337 if ((thread->base.thread_state & new_state) == 0U) {
1338 thread->base.thread_state |= new_state;
1339 if (z_is_thread_queued(thread)) {
1340 dequeue_thread(thread);
1341 }
1342
1343 if (new_state == _THREAD_DEAD) {
1344 if (thread->base.pended_on != NULL) {
1345 unpend_thread_no_timeout(thread);
1346 }
1347 (void)z_abort_thread_timeout(thread);
1348 unpend_all(&thread->join_queue);
1349
1350 /* Edge case: aborting _current from within an
1351 * ISR that preempted it requires clearing the
1352 * _current pointer so the upcoming context
1353 * switch doesn't clobber the now-freed
1354 * memory
1355 */
1356 if (thread == _current && arch_is_in_isr()) {
1357 dummify = true;
1358 }
1359 }
1360 #ifdef CONFIG_SMP
1361 unpend_all(&thread->halt_queue);
1362 #endif /* CONFIG_SMP */
1363 update_cache(1);
1364
1365 if (new_state == _THREAD_SUSPENDED) {
1366 clear_halting(thread);
1367 return;
1368 }
1369
1370 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1371 arch_float_disable(thread);
1372 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
1373
1374 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1375
1376 z_thread_monitor_exit(thread);
1377 #ifdef CONFIG_THREAD_ABORT_HOOK
1378 thread_abort_hook(thread);
1379 #endif /* CONFIG_THREAD_ABORT_HOOK */
1380
1381 #ifdef CONFIG_OBJ_CORE_THREAD
1382 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1383 k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1384 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
1385 k_obj_core_unlink(K_OBJ_CORE(thread));
1386 #endif /* CONFIG_OBJ_CORE_THREAD */
1387
1388 #ifdef CONFIG_USERSPACE
1389 z_mem_domain_exit_thread(thread);
1390 k_thread_perms_all_clear(thread);
1391 k_object_uninit(thread->stack_obj);
1392 k_object_uninit(thread);
1393 #endif /* CONFIG_USERSPACE */
1394
1395 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1396 k_thread_abort_cleanup(thread);
1397 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1398
1399 /* Do this "set _current to dummy" step last so that
1400 * subsystems above can rely on _current being
1401 * unchanged. Disabled for posix as that arch
1402 * continues to use the _current pointer in its swap
1403 * code. Note that we must leave a non-null switch
1404 * handle for any threads spinning in join() (this can
1405 * never be used, as our thread is flagged dead, but
1406 * it must not be NULL otherwise join can deadlock).
1407 */
1408 if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1409 #ifdef CONFIG_USE_SWITCH
1410 _current->switch_handle = _current;
1411 #endif
1412 z_dummy_thread_init(&_thread_dummy);
1413
1414 }
1415
1416 /* Finally update the halting thread state, on which
1417 * other CPUs might be spinning (see
1418 * thread_halt_spin()).
1419 */
1420 clear_halting(thread);
1421 }
1422 }
1423
z_thread_abort(struct k_thread * thread)1424 void z_thread_abort(struct k_thread *thread)
1425 {
1426 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1427
1428 if (z_is_thread_essential(thread)) {
1429 k_spin_unlock(&_sched_spinlock, key);
1430 __ASSERT(false, "aborting essential thread %p", thread);
1431 k_panic();
1432 return;
1433 }
1434
1435 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1436 k_spin_unlock(&_sched_spinlock, key);
1437 return;
1438 }
1439
1440 z_thread_halt(thread, key, true);
1441 }
1442
1443 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(k_tid_t thread)1444 void z_impl_k_thread_abort(k_tid_t thread)
1445 {
1446 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1447
1448 z_thread_abort(thread);
1449
1450 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1451
1452 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1453 }
1454 #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
1455
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1456 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1457 {
1458 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1459 int ret;
1460
1461 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1462
1463 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1464 z_sched_switch_spin(thread);
1465 ret = 0;
1466 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1467 ret = -EBUSY;
1468 } else if ((thread == _current) ||
1469 (thread->base.pended_on == &_current->join_queue)) {
1470 ret = -EDEADLK;
1471 } else {
1472 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1473 add_to_waitq_locked(_current, &thread->join_queue);
1474 add_thread_timeout(_current, timeout);
1475
1476 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1477 ret = z_swap(&_sched_spinlock, key);
1478 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1479
1480 return ret;
1481 }
1482
1483 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1484
1485 k_spin_unlock(&_sched_spinlock, key);
1486 return ret;
1487 }
1488
1489 #ifdef CONFIG_USERSPACE
1490 /* Special case: don't oops if the thread is uninitialized. This is because
1491 * the initialization bit does double-duty for thread objects; if false, means
1492 * the thread object is truly uninitialized, or the thread ran and exited for
1493 * some reason.
1494 *
1495 * Return true in this case indicating we should just do nothing and return
1496 * success to the caller.
1497 */
thread_obj_validate(struct k_thread * thread)1498 static bool thread_obj_validate(struct k_thread *thread)
1499 {
1500 struct k_object *ko = k_object_find(thread);
1501 int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1502
1503 switch (ret) {
1504 case 0:
1505 return false;
1506 case -EINVAL:
1507 return true;
1508 default:
1509 #ifdef CONFIG_LOG
1510 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1511 #endif /* CONFIG_LOG */
1512 K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1513 }
1514 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1515 }
1516
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1517 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1518 k_timeout_t timeout)
1519 {
1520 if (thread_obj_validate(thread)) {
1521 return 0;
1522 }
1523
1524 return z_impl_k_thread_join(thread, timeout);
1525 }
1526 #include <zephyr/syscalls/k_thread_join_mrsh.c>
1527
z_vrfy_k_thread_abort(k_tid_t thread)1528 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1529 {
1530 if (thread_obj_validate(thread)) {
1531 return;
1532 }
1533
1534 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
1535 "aborting essential thread %p", thread));
1536
1537 z_impl_k_thread_abort((struct k_thread *)thread);
1538 }
1539 #include <zephyr/syscalls/k_thread_abort_mrsh.c>
1540 #endif /* CONFIG_USERSPACE */
1541
1542 /*
1543 * future scheduler.h API implementations
1544 */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1545 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1546 {
1547 struct k_thread *thread;
1548 bool ret = false;
1549
1550 K_SPINLOCK(&_sched_spinlock) {
1551 thread = _priq_wait_best(&wait_q->waitq);
1552
1553 if (thread != NULL) {
1554 z_thread_return_value_set_with_data(thread,
1555 swap_retval,
1556 swap_data);
1557 unpend_thread_no_timeout(thread);
1558 (void)z_abort_thread_timeout(thread);
1559 ready_thread(thread);
1560 ret = true;
1561 }
1562 }
1563
1564 return ret;
1565 }
1566
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1567 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1568 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1569 {
1570 int ret = z_pend_curr(lock, key, wait_q, timeout);
1571
1572 if (data != NULL) {
1573 *data = _current->base.swap_data;
1574 }
1575 return ret;
1576 }
1577
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1578 int z_sched_waitq_walk(_wait_q_t *wait_q,
1579 int (*func)(struct k_thread *, void *), void *data)
1580 {
1581 struct k_thread *thread;
1582 int status = 0;
1583
1584 K_SPINLOCK(&_sched_spinlock) {
1585 _WAIT_Q_FOR_EACH(wait_q, thread) {
1586
1587 /*
1588 * Invoke the callback function on each waiting thread
1589 * for as long as there are both waiting threads AND
1590 * it returns 0.
1591 */
1592
1593 status = func(thread, data);
1594 if (status != 0) {
1595 break;
1596 }
1597 }
1598 }
1599
1600 return status;
1601 }
1602