1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <wait_q.h>
10 #include <kthread.h>
11 #include <priority_q.h>
12 #include <kswap.h>
13 #include <ipi.h>
14 #include <kernel_arch_func.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/drivers/timer/system_timer.h>
17 #include <stdbool.h>
18 #include <kernel_internal.h>
19 #include <zephyr/logging/log.h>
20 #include <zephyr/sys/atomic.h>
21 #include <zephyr/sys/math_extras.h>
22 #include <zephyr/timing/timing.h>
23 #include <zephyr/sys/util.h>
24 
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26 
27 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28 extern struct k_thread *pending_current;
29 #endif
30 
31 struct k_spinlock _sched_spinlock;
32 
33 /* Storage to "complete" the context switch from an invalid/incomplete thread
34  * context (ex: exiting an ISR that aborted _current)
35  */
36 __incoherent struct k_thread _thread_dummy;
37 
38 static ALWAYS_INLINE void update_cache(int preempt_ok);
39 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
41 
42 
43 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 	     "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 	     "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 	     "threads.");
47 
thread_runq(struct k_thread * thread)48 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
49 {
50 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
51 	int cpu, m = thread->base.cpu_mask;
52 
53 	/* Edge case: it's legal per the API to "make runnable" a
54 	 * thread with all CPUs masked off (i.e. one that isn't
55 	 * actually runnable!).  Sort of a wart in the API and maybe
56 	 * we should address this in docs/assertions instead to avoid
57 	 * the extra test.
58 	 */
59 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
60 
61 	return &_kernel.cpus[cpu].ready_q.runq;
62 #else
63 	ARG_UNUSED(thread);
64 	return &_kernel.ready_q.runq;
65 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
66 }
67 
curr_cpu_runq(void)68 static ALWAYS_INLINE void *curr_cpu_runq(void)
69 {
70 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
71 	return &arch_curr_cpu()->ready_q.runq;
72 #else
73 	return &_kernel.ready_q.runq;
74 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
75 }
76 
runq_add(struct k_thread * thread)77 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
78 {
79 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
80 
81 	_priq_run_add(thread_runq(thread), thread);
82 }
83 
runq_remove(struct k_thread * thread)84 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
85 {
86 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
87 
88 	_priq_run_remove(thread_runq(thread), thread);
89 }
90 
runq_yield(void)91 static ALWAYS_INLINE void runq_yield(void)
92 {
93 	_priq_run_yield(curr_cpu_runq());
94 }
95 
runq_best(void)96 static ALWAYS_INLINE struct k_thread *runq_best(void)
97 {
98 	return _priq_run_best(curr_cpu_runq());
99 }
100 
101 /* _current is never in the run queue until context switch on
102  * SMP configurations, see z_requeue_current()
103  */
should_queue_thread(struct k_thread * thread)104 static inline bool should_queue_thread(struct k_thread *thread)
105 {
106 	return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
107 }
108 
queue_thread(struct k_thread * thread)109 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
110 {
111 	z_mark_thread_as_queued(thread);
112 	if (should_queue_thread(thread)) {
113 		runq_add(thread);
114 	}
115 #ifdef CONFIG_SMP
116 	if (thread == _current) {
117 		/* add current to end of queue means "yield" */
118 		_current_cpu->swap_ok = true;
119 	}
120 #endif /* CONFIG_SMP */
121 }
122 
dequeue_thread(struct k_thread * thread)123 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
124 {
125 	z_mark_thread_as_not_queued(thread);
126 	if (should_queue_thread(thread)) {
127 		runq_remove(thread);
128 	}
129 }
130 
131 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
132  * never live in the run queue until we are inexorably on the context
133  * switch path on SMP, otherwise there is a deadlock condition where a
134  * set of CPUs pick a cycle of threads to run and wait for them all to
135  * context switch forever.
136  */
z_requeue_current(struct k_thread * thread)137 void z_requeue_current(struct k_thread *thread)
138 {
139 	if (z_is_thread_queued(thread)) {
140 		runq_add(thread);
141 	}
142 	signal_pending_ipi();
143 }
144 
145 /* Return true if the thread is aborting, else false */
is_aborting(struct k_thread * thread)146 static inline bool is_aborting(struct k_thread *thread)
147 {
148 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
149 }
150 
151 /* Return true if the thread is aborting or suspending, else false */
is_halting(struct k_thread * thread)152 static inline bool is_halting(struct k_thread *thread)
153 {
154 	return (thread->base.thread_state &
155 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
156 }
157 
158 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)159 static inline void clear_halting(struct k_thread *thread)
160 {
161 	if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
162 		barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
163 		thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
164 	}
165 }
166 
next_up(void)167 static ALWAYS_INLINE struct k_thread *next_up(void)
168 {
169 #ifdef CONFIG_SMP
170 	if (is_halting(_current)) {
171 		halt_thread(_current, is_aborting(_current) ?
172 				      _THREAD_DEAD : _THREAD_SUSPENDED);
173 	}
174 #endif /* CONFIG_SMP */
175 
176 	struct k_thread *thread = runq_best();
177 
178 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
179 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
180 	/* MetaIRQs must always attempt to return back to a
181 	 * cooperative thread they preempted and not whatever happens
182 	 * to be highest priority now. The cooperative thread was
183 	 * promised it wouldn't be preempted (by non-metairq threads)!
184 	 */
185 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
186 
187 	if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
188 		if (!z_is_thread_prevented_from_running(mirqp)) {
189 			thread = mirqp;
190 		} else {
191 			_current_cpu->metairq_preempted = NULL;
192 		}
193 	}
194 #endif
195 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
196  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
197  */
198 
199 #ifndef CONFIG_SMP
200 	/* In uniprocessor mode, we can leave the current thread in
201 	 * the queue (actually we have to, otherwise the assembly
202 	 * context switch code for all architectures would be
203 	 * responsible for putting it back in z_swap and ISR return!),
204 	 * which makes this choice simple.
205 	 */
206 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
207 #else
208 	/* Under SMP, the "cache" mechanism for selecting the next
209 	 * thread doesn't work, so we have more work to do to test
210 	 * _current against the best choice from the queue.  Here, the
211 	 * thread selected above represents "the best thread that is
212 	 * not current".
213 	 *
214 	 * Subtle note on "queued": in SMP mode, _current does not
215 	 * live in the queue, so this isn't exactly the same thing as
216 	 * "ready", it means "is _current already added back to the
217 	 * queue such that we don't want to re-add it".
218 	 */
219 	bool queued = z_is_thread_queued(_current);
220 	bool active = !z_is_thread_prevented_from_running(_current);
221 
222 	if (thread == NULL) {
223 		thread = _current_cpu->idle_thread;
224 	}
225 
226 	if (active) {
227 		int32_t cmp = z_sched_prio_cmp(_current, thread);
228 
229 		/* Ties only switch if state says we yielded */
230 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
231 			thread = _current;
232 		}
233 
234 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
235 			thread = _current;
236 		}
237 	}
238 
239 	/* Put _current back into the queue */
240 	if ((thread != _current) && active &&
241 		!z_is_idle_thread_object(_current) && !queued) {
242 		queue_thread(_current);
243 	}
244 
245 	/* Take the new _current out of the queue */
246 	if (z_is_thread_queued(thread)) {
247 		dequeue_thread(thread);
248 	}
249 
250 	_current_cpu->swap_ok = false;
251 	return thread;
252 #endif /* CONFIG_SMP */
253 }
254 
move_thread_to_end_of_prio_q(struct k_thread * thread)255 void move_thread_to_end_of_prio_q(struct k_thread *thread)
256 {
257 	if (z_is_thread_queued(thread)) {
258 		dequeue_thread(thread);
259 	}
260 	queue_thread(thread);
261 	update_cache(thread == _current);
262 }
263 
264 /* Track cooperative threads preempted by metairqs so we can return to
265  * them specifically.  Called at the moment a new thread has been
266  * selected to run.
267  */
update_metairq_preempt(struct k_thread * thread)268 static void update_metairq_preempt(struct k_thread *thread)
269 {
270 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
271 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
272 	if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
273 	    !thread_is_preemptible(_current)) {
274 		/* Record new preemption */
275 		_current_cpu->metairq_preempted = _current;
276 	} else if (!thread_is_metairq(thread)) {
277 		/* Returning from existing preemption */
278 		_current_cpu->metairq_preempted = NULL;
279 	}
280 #else
281 	ARG_UNUSED(thread);
282 #endif
283 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
284  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
285  */
286 }
287 
update_cache(int preempt_ok)288 static ALWAYS_INLINE void update_cache(int preempt_ok)
289 {
290 #ifndef CONFIG_SMP
291 	struct k_thread *thread = next_up();
292 
293 	if (should_preempt(thread, preempt_ok)) {
294 #ifdef CONFIG_TIMESLICING
295 		if (thread != _current) {
296 			z_reset_time_slice(thread);
297 		}
298 #endif /* CONFIG_TIMESLICING */
299 		update_metairq_preempt(thread);
300 		_kernel.ready_q.cache = thread;
301 	} else {
302 		_kernel.ready_q.cache = _current;
303 	}
304 
305 #else
306 	/* The way this works is that the CPU record keeps its
307 	 * "cooperative swapping is OK" flag until the next reschedule
308 	 * call or context switch.  It doesn't need to be tracked per
309 	 * thread because if the thread gets preempted for whatever
310 	 * reason the scheduler will make the same decision anyway.
311 	 */
312 	_current_cpu->swap_ok = preempt_ok;
313 #endif /* CONFIG_SMP */
314 }
315 
thread_active_elsewhere(struct k_thread * thread)316 static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
317 {
318 	/* Returns pointer to _cpu if the thread is currently running on
319 	 * another CPU. There are more scalable designs to answer this
320 	 * question in constant time, but this is fine for now.
321 	 */
322 #ifdef CONFIG_SMP
323 	int currcpu = _current_cpu->id;
324 
325 	unsigned int num_cpus = arch_num_cpus();
326 
327 	for (int i = 0; i < num_cpus; i++) {
328 		if ((i != currcpu) &&
329 		    (_kernel.cpus[i].current == thread)) {
330 			return &_kernel.cpus[i];
331 		}
332 	}
333 #endif /* CONFIG_SMP */
334 	ARG_UNUSED(thread);
335 	return NULL;
336 }
337 
ready_thread(struct k_thread * thread)338 static void ready_thread(struct k_thread *thread)
339 {
340 #ifdef CONFIG_KERNEL_COHERENCE
341 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
342 #endif /* CONFIG_KERNEL_COHERENCE */
343 
344 	/* If thread is queued already, do not try and added it to the
345 	 * run queue again
346 	 */
347 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
348 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
349 
350 		queue_thread(thread);
351 		update_cache(0);
352 
353 		flag_ipi(ipi_mask_create(thread));
354 	}
355 }
356 
z_ready_thread(struct k_thread * thread)357 void z_ready_thread(struct k_thread *thread)
358 {
359 	K_SPINLOCK(&_sched_spinlock) {
360 		if (thread_active_elsewhere(thread) == NULL) {
361 			ready_thread(thread);
362 		}
363 	}
364 }
365 
z_move_thread_to_end_of_prio_q(struct k_thread * thread)366 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
367 {
368 	K_SPINLOCK(&_sched_spinlock) {
369 		move_thread_to_end_of_prio_q(thread);
370 	}
371 }
372 
373 /* Spins in ISR context, waiting for a thread known to be running on
374  * another CPU to catch the IPI we sent and halt.  Note that we check
375  * for ourselves being asynchronously halted first to prevent simple
376  * deadlocks (but not complex ones involving cycles of 3+ threads!).
377  * Acts to release the provided lock before returning.
378  */
thread_halt_spin(struct k_thread * thread,k_spinlock_key_t key)379 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
380 {
381 	if (is_halting(_current)) {
382 		halt_thread(_current,
383 			    is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
384 	}
385 	k_spin_unlock(&_sched_spinlock, key);
386 	while (is_halting(thread)) {
387 		unsigned int k = arch_irq_lock();
388 
389 		arch_spin_relax(); /* Requires interrupts be masked */
390 		arch_irq_unlock(k);
391 	}
392 }
393 
394 /* Shared handler for k_thread_{suspend,abort}().  Called with the
395  * scheduler lock held and the key passed (which it may
396  * release/reacquire!) which will be released before a possible return
397  * (aborting _current will not return, obviously), which may be after
398  * a context switch.
399  */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)400 static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
401 					bool terminate)
402 {
403 	_wait_q_t *wq = &thread->join_queue;
404 #ifdef CONFIG_SMP
405 	wq = terminate ? wq : &thread->halt_queue;
406 #endif
407 
408 	/* If the target is a thread running on another CPU, flag and
409 	 * poke (note that we might spin to wait, so a true
410 	 * synchronous IPI is needed here, not deferred!), it will
411 	 * halt itself in the IPI.  Otherwise it's unscheduled, so we
412 	 * can clean it up directly.
413 	 */
414 
415 	struct _cpu *cpu = thread_active_elsewhere(thread);
416 
417 	if (cpu != NULL) {
418 		thread->base.thread_state |= (terminate ? _THREAD_ABORTING
419 					      : _THREAD_SUSPENDING);
420 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
421 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
422 		arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
423 #else
424 		arch_sched_broadcast_ipi();
425 #endif
426 #endif
427 		if (arch_is_in_isr()) {
428 			thread_halt_spin(thread, key);
429 		} else  {
430 			add_to_waitq_locked(_current, wq);
431 			z_swap(&_sched_spinlock, key);
432 		}
433 	} else {
434 		halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
435 		if ((thread == _current) && !arch_is_in_isr()) {
436 			if (z_is_thread_essential(thread)) {
437 				k_spin_unlock(&_sched_spinlock, key);
438 				k_panic();
439 				key = k_spin_lock(&_sched_spinlock);
440 			}
441 			z_swap(&_sched_spinlock, key);
442 			__ASSERT(!terminate, "aborted _current back from dead");
443 		} else {
444 			k_spin_unlock(&_sched_spinlock, key);
445 		}
446 	}
447 	/* NOTE: the scheduler lock has been released.  Don't put
448 	 * logic here, it's likely to be racy/deadlocky even if you
449 	 * re-take the lock!
450 	 */
451 }
452 
453 
z_impl_k_thread_suspend(k_tid_t thread)454 void z_impl_k_thread_suspend(k_tid_t thread)
455 {
456 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
457 
458 	/* Special case "suspend the current thread" as it doesn't
459 	 * need the async complexity below.
460 	 */
461 	if (!IS_ENABLED(CONFIG_SMP) && (thread == _current) && !arch_is_in_isr()) {
462 		k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
463 
464 		z_mark_thread_as_suspended(thread);
465 		dequeue_thread(thread);
466 		update_cache(1);
467 		z_swap(&_sched_spinlock, key);
468 		return;
469 	}
470 
471 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
472 
473 	if (unlikely(z_is_thread_suspended(thread))) {
474 
475 		/* The target thread is already suspended. Nothing to do. */
476 
477 		k_spin_unlock(&_sched_spinlock, key);
478 		return;
479 	}
480 
481 	z_thread_halt(thread, key, false);
482 
483 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
484 }
485 
486 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(k_tid_t thread)487 static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
488 {
489 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
490 	z_impl_k_thread_suspend(thread);
491 }
492 #include <zephyr/syscalls/k_thread_suspend_mrsh.c>
493 #endif /* CONFIG_USERSPACE */
494 
z_impl_k_thread_resume(k_tid_t thread)495 void z_impl_k_thread_resume(k_tid_t thread)
496 {
497 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
498 
499 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
500 
501 	/* Do not try to resume a thread that was not suspended */
502 	if (unlikely(!z_is_thread_suspended(thread))) {
503 		k_spin_unlock(&_sched_spinlock, key);
504 		return;
505 	}
506 
507 	z_mark_thread_as_not_suspended(thread);
508 	ready_thread(thread);
509 
510 	z_reschedule(&_sched_spinlock, key);
511 
512 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
513 }
514 
515 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(k_tid_t thread)516 static inline void z_vrfy_k_thread_resume(k_tid_t thread)
517 {
518 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
519 	z_impl_k_thread_resume(thread);
520 }
521 #include <zephyr/syscalls/k_thread_resume_mrsh.c>
522 #endif /* CONFIG_USERSPACE */
523 
unready_thread(struct k_thread * thread)524 static void unready_thread(struct k_thread *thread)
525 {
526 	if (z_is_thread_queued(thread)) {
527 		dequeue_thread(thread);
528 	}
529 	update_cache(thread == _current);
530 }
531 
532 /* _sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)533 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
534 {
535 	unready_thread(thread);
536 	z_mark_thread_as_pending(thread);
537 
538 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
539 
540 	if (wait_q != NULL) {
541 		thread->base.pended_on = wait_q;
542 		_priq_wait_add(&wait_q->waitq, thread);
543 	}
544 }
545 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)546 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
547 {
548 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
549 		z_add_thread_timeout(thread, timeout);
550 	}
551 }
552 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)553 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
554 			k_timeout_t timeout)
555 {
556 #ifdef CONFIG_KERNEL_COHERENCE
557 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
558 #endif /* CONFIG_KERNEL_COHERENCE */
559 	add_to_waitq_locked(thread, wait_q);
560 	add_thread_timeout(thread, timeout);
561 }
562 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)563 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
564 		   k_timeout_t timeout)
565 {
566 	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
567 	K_SPINLOCK(&_sched_spinlock) {
568 		pend_locked(thread, wait_q, timeout);
569 	}
570 }
571 
z_unpend_thread_no_timeout(struct k_thread * thread)572 void z_unpend_thread_no_timeout(struct k_thread *thread)
573 {
574 	K_SPINLOCK(&_sched_spinlock) {
575 		if (thread->base.pended_on != NULL) {
576 			unpend_thread_no_timeout(thread);
577 		}
578 	}
579 }
580 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)581 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
582 {
583 	K_SPINLOCK(&_sched_spinlock) {
584 		bool killed = (thread->base.thread_state &
585 				(_THREAD_DEAD | _THREAD_ABORTING));
586 
587 #ifdef CONFIG_EVENTS
588 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
589 
590 		thread->no_wake_on_timeout = false;
591 
592 		if (do_nothing) {
593 			continue;
594 		}
595 #endif /* CONFIG_EVENTS */
596 
597 		if (!killed) {
598 			/* The thread is not being killed */
599 			if (thread->base.pended_on != NULL) {
600 				unpend_thread_no_timeout(thread);
601 			}
602 			z_mark_thread_as_not_sleeping(thread);
603 			ready_thread(thread);
604 		}
605 	}
606 
607 }
608 
609 #ifdef CONFIG_SYS_CLOCK_EXISTS
610 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)611 void z_thread_timeout(struct _timeout *timeout)
612 {
613 	struct k_thread *thread = CONTAINER_OF(timeout,
614 					       struct k_thread, base.timeout);
615 
616 	z_sched_wake_thread(thread, true);
617 }
618 #endif /* CONFIG_SYS_CLOCK_EXISTS */
619 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)620 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
621 	       _wait_q_t *wait_q, k_timeout_t timeout)
622 {
623 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
624 	pending_current = _current;
625 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
626 	__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
627 
628 	/* We do a "lock swap" prior to calling z_swap(), such that
629 	 * the caller's lock gets released as desired.  But we ensure
630 	 * that we hold the scheduler lock and leave local interrupts
631 	 * masked until we reach the context switch.  z_swap() itself
632 	 * has similar code; the duplication is because it's a legacy
633 	 * API that doesn't expect to be called with scheduler lock
634 	 * held.
635 	 */
636 	(void) k_spin_lock(&_sched_spinlock);
637 	pend_locked(_current, wait_q, timeout);
638 	k_spin_release(lock);
639 	return z_swap(&_sched_spinlock, key);
640 }
641 
z_unpend1_no_timeout(_wait_q_t * wait_q)642 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
643 {
644 	struct k_thread *thread = NULL;
645 
646 	K_SPINLOCK(&_sched_spinlock) {
647 		thread = _priq_wait_best(&wait_q->waitq);
648 
649 		if (thread != NULL) {
650 			unpend_thread_no_timeout(thread);
651 		}
652 	}
653 
654 	return thread;
655 }
656 
z_unpend_thread(struct k_thread * thread)657 void z_unpend_thread(struct k_thread *thread)
658 {
659 	z_unpend_thread_no_timeout(thread);
660 	z_abort_thread_timeout(thread);
661 }
662 
663 /* Priority set utility that does no rescheduling, it just changes the
664  * run queue state, returning true if a reschedule is needed later.
665  */
z_thread_prio_set(struct k_thread * thread,int prio)666 bool z_thread_prio_set(struct k_thread *thread, int prio)
667 {
668 	bool need_sched = 0;
669 	int old_prio = thread->base.prio;
670 
671 	K_SPINLOCK(&_sched_spinlock) {
672 		need_sched = z_is_thread_ready(thread);
673 
674 		if (need_sched) {
675 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
676 				dequeue_thread(thread);
677 				thread->base.prio = prio;
678 				queue_thread(thread);
679 
680 				if (old_prio > prio) {
681 					flag_ipi(ipi_mask_create(thread));
682 				}
683 			} else {
684 				/*
685 				 * This is a running thread on SMP. Update its
686 				 * priority, but do not requeue it. An IPI is
687 				 * needed if the priority is both being lowered
688 				 * and it is running on another CPU.
689 				 */
690 
691 				thread->base.prio = prio;
692 
693 				struct _cpu *cpu;
694 
695 				cpu = thread_active_elsewhere(thread);
696 				if ((cpu != NULL) && (old_prio < prio)) {
697 					flag_ipi(IPI_CPU_MASK(cpu->id));
698 				}
699 			}
700 
701 			update_cache(1);
702 		} else {
703 			thread->base.prio = prio;
704 		}
705 	}
706 
707 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
708 
709 	return need_sched;
710 }
711 
resched(uint32_t key)712 static inline bool resched(uint32_t key)
713 {
714 #ifdef CONFIG_SMP
715 	_current_cpu->swap_ok = 0;
716 #endif /* CONFIG_SMP */
717 
718 	return arch_irq_unlocked(key) && !arch_is_in_isr();
719 }
720 
721 /*
722  * Check if the next ready thread is the same as the current thread
723  * and save the trip if true.
724  */
need_swap(void)725 static inline bool need_swap(void)
726 {
727 	/* the SMP case will be handled in C based z_swap() */
728 #ifdef CONFIG_SMP
729 	return true;
730 #else
731 	struct k_thread *new_thread;
732 
733 	/* Check if the next ready thread is the same as the current thread */
734 	new_thread = _kernel.ready_q.cache;
735 	return new_thread != _current;
736 #endif /* CONFIG_SMP */
737 }
738 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)739 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
740 {
741 	if (resched(key.key) && need_swap()) {
742 		z_swap(lock, key);
743 	} else {
744 		k_spin_unlock(lock, key);
745 		signal_pending_ipi();
746 	}
747 }
748 
z_reschedule_irqlock(uint32_t key)749 void z_reschedule_irqlock(uint32_t key)
750 {
751 	if (resched(key) && need_swap()) {
752 		z_swap_irqlock(key);
753 	} else {
754 		irq_unlock(key);
755 		signal_pending_ipi();
756 	}
757 }
758 
k_sched_lock(void)759 void k_sched_lock(void)
760 {
761 	K_SPINLOCK(&_sched_spinlock) {
762 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
763 
764 		z_sched_lock();
765 	}
766 }
767 
k_sched_unlock(void)768 void k_sched_unlock(void)
769 {
770 	K_SPINLOCK(&_sched_spinlock) {
771 		__ASSERT(_current->base.sched_locked != 0U, "");
772 		__ASSERT(!arch_is_in_isr(), "");
773 
774 		++_current->base.sched_locked;
775 		update_cache(0);
776 	}
777 
778 	LOG_DBG("scheduler unlocked (%p:%d)",
779 		_current, _current->base.sched_locked);
780 
781 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
782 
783 	z_reschedule_unlocked();
784 }
785 
z_swap_next_thread(void)786 struct k_thread *z_swap_next_thread(void)
787 {
788 #ifdef CONFIG_SMP
789 	struct k_thread *ret = next_up();
790 
791 	if (ret == _current) {
792 		/* When not swapping, have to signal IPIs here.  In
793 		 * the context switch case it must happen later, after
794 		 * _current gets requeued.
795 		 */
796 		signal_pending_ipi();
797 	}
798 	return ret;
799 #else
800 	return _kernel.ready_q.cache;
801 #endif /* CONFIG_SMP */
802 }
803 
804 #ifdef CONFIG_USE_SWITCH
805 /* Just a wrapper around z_current_thread_set(xxx) with tracing */
set_current(struct k_thread * new_thread)806 static inline void set_current(struct k_thread *new_thread)
807 {
808 	z_thread_mark_switched_out();
809 	z_current_thread_set(new_thread);
810 }
811 
812 /**
813  * @brief Determine next thread to execute upon completion of an interrupt
814  *
815  * Thread preemption is performed by context switching after the completion
816  * of a non-recursed interrupt. This function determines which thread to
817  * switch to if any. This function accepts as @p interrupted either:
818  *
819  * - The handle for the interrupted thread in which case the thread's context
820  *   must already be fully saved and ready to be picked up by a different CPU.
821  *
822  * - NULL if more work is required to fully save the thread's state after
823  *   it is known that a new thread is to be scheduled. It is up to the caller
824  *   to store the handle resulting from the thread that is being switched out
825  *   in that thread's "switch_handle" field after its
826  *   context has fully been saved, following the same requirements as with
827  *   the @ref arch_switch() function.
828  *
829  * If a new thread needs to be scheduled then its handle is returned.
830  * Otherwise the same value provided as @p interrupted is returned back.
831  * Those handles are the same opaque types used by the @ref arch_switch()
832  * function.
833  *
834  * @warning
835  * The _current value may have changed after this call and not refer
836  * to the interrupted thread anymore. It might be necessary to make a local
837  * copy before calling this function.
838  *
839  * @param interrupted Handle for the thread that was interrupted or NULL.
840  * @retval Handle for the next thread to execute, or @p interrupted when
841  *         no new thread is to be scheduled.
842  */
z_get_next_switch_handle(void * interrupted)843 void *z_get_next_switch_handle(void *interrupted)
844 {
845 	z_check_stack_sentinel();
846 
847 #ifdef CONFIG_SMP
848 	void *ret = NULL;
849 
850 	K_SPINLOCK(&_sched_spinlock) {
851 		struct k_thread *old_thread = _current, *new_thread;
852 
853 		if (IS_ENABLED(CONFIG_SMP)) {
854 			old_thread->switch_handle = NULL;
855 		}
856 		new_thread = next_up();
857 
858 		z_sched_usage_switch(new_thread);
859 
860 		if (old_thread != new_thread) {
861 			uint8_t  cpu_id;
862 
863 			update_metairq_preempt(new_thread);
864 			z_sched_switch_spin(new_thread);
865 			arch_cohere_stacks(old_thread, interrupted, new_thread);
866 
867 			_current_cpu->swap_ok = 0;
868 			cpu_id = arch_curr_cpu()->id;
869 			new_thread->base.cpu = cpu_id;
870 			set_current(new_thread);
871 
872 #ifdef CONFIG_TIMESLICING
873 			z_reset_time_slice(new_thread);
874 #endif /* CONFIG_TIMESLICING */
875 
876 #ifdef CONFIG_SPIN_VALIDATE
877 			/* Changed _current!  Update the spinlock
878 			 * bookkeeping so the validation doesn't get
879 			 * confused when the "wrong" thread tries to
880 			 * release the lock.
881 			 */
882 			z_spin_lock_set_owner(&_sched_spinlock);
883 #endif /* CONFIG_SPIN_VALIDATE */
884 
885 			/* A queued (runnable) old/current thread
886 			 * needs to be added back to the run queue
887 			 * here, and atomically with its switch handle
888 			 * being set below.  This is safe now, as we
889 			 * will not return into it.
890 			 */
891 			if (z_is_thread_queued(old_thread)) {
892 #ifdef CONFIG_SCHED_IPI_CASCADE
893 				if ((new_thread->base.cpu_mask != -1) &&
894 				    (old_thread->base.cpu_mask != BIT(cpu_id))) {
895 					flag_ipi(ipi_mask_create(old_thread));
896 				}
897 #endif
898 				runq_add(old_thread);
899 			}
900 		}
901 		old_thread->switch_handle = interrupted;
902 		ret = new_thread->switch_handle;
903 		if (IS_ENABLED(CONFIG_SMP)) {
904 			/* Active threads MUST have a null here */
905 			new_thread->switch_handle = NULL;
906 		}
907 	}
908 	signal_pending_ipi();
909 	return ret;
910 #else
911 	z_sched_usage_switch(_kernel.ready_q.cache);
912 	_current->switch_handle = interrupted;
913 	set_current(_kernel.ready_q.cache);
914 	return _current->switch_handle;
915 #endif /* CONFIG_SMP */
916 }
917 #endif /* CONFIG_USE_SWITCH */
918 
z_unpend_all(_wait_q_t * wait_q)919 int z_unpend_all(_wait_q_t *wait_q)
920 {
921 	int need_sched = 0;
922 	struct k_thread *thread;
923 
924 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
925 		z_unpend_thread(thread);
926 		z_ready_thread(thread);
927 		need_sched = 1;
928 	}
929 
930 	return need_sched;
931 }
932 
init_ready_q(struct _ready_q * ready_q)933 void init_ready_q(struct _ready_q *ready_q)
934 {
935 	_priq_run_init(&ready_q->runq);
936 }
937 
z_sched_init(void)938 void z_sched_init(void)
939 {
940 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
941 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
942 		init_ready_q(&_kernel.cpus[i].ready_q);
943 	}
944 #else
945 	init_ready_q(&_kernel.ready_q);
946 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
947 }
948 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)949 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
950 {
951 	/*
952 	 * Use NULL, since we cannot know what the entry point is (we do not
953 	 * keep track of it) and idle cannot change its priority.
954 	 */
955 	Z_ASSERT_VALID_PRIO(prio, NULL);
956 
957 	bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
958 
959 	if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
960 			     (_current->base.sched_locked == 0U))) {
961 		z_reschedule_unlocked();
962 	}
963 }
964 
965 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)966 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
967 {
968 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
969 	K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
970 				    "invalid thread priority %d", prio));
971 #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
972 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
973 				    "thread priority may only be downgraded (%d < %d)",
974 				    prio, thread->base.prio));
975 #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
976 	z_impl_k_thread_priority_set(thread, prio);
977 }
978 #include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
979 #endif /* CONFIG_USERSPACE */
980 
981 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)982 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
983 {
984 
985 	deadline = CLAMP(deadline, 0, INT_MAX);
986 
987 	struct k_thread *thread = tid;
988 	int32_t newdl = k_cycle_get_32() + deadline;
989 
990 	/* The prio_deadline field changes the sorting order, so can't
991 	 * change it while the thread is in the run queue (dlists
992 	 * actually are benign as long as we requeue it before we
993 	 * release the lock, but an rbtree will blow up if we break
994 	 * sorting!)
995 	 */
996 	K_SPINLOCK(&_sched_spinlock) {
997 		if (z_is_thread_queued(thread)) {
998 			dequeue_thread(thread);
999 			thread->base.prio_deadline = newdl;
1000 			queue_thread(thread);
1001 		} else {
1002 			thread->base.prio_deadline = newdl;
1003 		}
1004 	}
1005 }
1006 
1007 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1008 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1009 {
1010 	struct k_thread *thread = tid;
1011 
1012 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1013 	K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1014 				    "invalid thread deadline %d",
1015 				    (int)deadline));
1016 
1017 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1018 }
1019 #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
1020 #endif /* CONFIG_USERSPACE */
1021 #endif /* CONFIG_SCHED_DEADLINE */
1022 
z_impl_k_reschedule(void)1023 void z_impl_k_reschedule(void)
1024 {
1025 	k_spinlock_key_t key;
1026 
1027 	key = k_spin_lock(&_sched_spinlock);
1028 
1029 	update_cache(0);
1030 
1031 	z_reschedule(&_sched_spinlock, key);
1032 }
1033 
1034 #ifdef CONFIG_USERSPACE
z_vrfy_k_reschedule(void)1035 static inline void z_vrfy_k_reschedule(void)
1036 {
1037 	z_impl_k_reschedule();
1038 }
1039 #include <zephyr/syscalls/k_reschedule_mrsh.c>
1040 #endif /* CONFIG_USERSPACE */
1041 
k_can_yield(void)1042 bool k_can_yield(void)
1043 {
1044 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1045 		 z_is_idle_thread_object(_current));
1046 }
1047 
z_impl_k_yield(void)1048 void z_impl_k_yield(void)
1049 {
1050 	__ASSERT(!arch_is_in_isr(), "");
1051 
1052 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1053 
1054 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1055 
1056 	runq_yield();
1057 
1058 	update_cache(1);
1059 	z_swap(&_sched_spinlock, key);
1060 }
1061 
1062 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1063 static inline void z_vrfy_k_yield(void)
1064 {
1065 	z_impl_k_yield();
1066 }
1067 #include <zephyr/syscalls/k_yield_mrsh.c>
1068 #endif /* CONFIG_USERSPACE */
1069 
z_tick_sleep(k_timeout_t timeout)1070 static int32_t z_tick_sleep(k_timeout_t timeout)
1071 {
1072 	uint32_t expected_wakeup_ticks;
1073 
1074 	__ASSERT(!arch_is_in_isr(), "");
1075 
1076 	LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)timeout.ticks);
1077 
1078 	/* K_NO_WAIT is treated as a 'yield' */
1079 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1080 		k_yield();
1081 		return 0;
1082 	}
1083 
1084 	if (Z_IS_TIMEOUT_RELATIVE(timeout)) {
1085 		expected_wakeup_ticks = timeout.ticks + sys_clock_tick_get_32();
1086 	} else {
1087 		expected_wakeup_ticks = Z_TICK_ABS(timeout.ticks);
1088 	}
1089 
1090 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1091 
1092 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1093 	pending_current = _current;
1094 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
1095 	unready_thread(_current);
1096 	z_add_thread_timeout(_current, timeout);
1097 	z_mark_thread_as_sleeping(_current);
1098 
1099 	(void)z_swap(&_sched_spinlock, key);
1100 
1101 	/* We require a 32 bit unsigned subtraction to care a wraparound */
1102 	uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32();
1103 
1104 	/* To handle a negative value correctly, once type-cast it to signed 32 bit */
1105 	k_ticks_t ticks = (k_ticks_t)(int32_t)left_ticks;
1106 
1107 	if (ticks > 0) {
1108 		return ticks;
1109 	}
1110 
1111 	return 0;
1112 }
1113 
z_impl_k_sleep(k_timeout_t timeout)1114 int32_t z_impl_k_sleep(k_timeout_t timeout)
1115 {
1116 	k_ticks_t ticks;
1117 
1118 	__ASSERT(!arch_is_in_isr(), "");
1119 
1120 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1121 
1122 	ticks = z_tick_sleep(timeout);
1123 
1124 	/* k_sleep() still returns 32 bit milliseconds for compatibility */
1125 	int64_t ms = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
1126 		CLAMP(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX);
1127 
1128 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ms);
1129 	return (int32_t) ms;
1130 }
1131 
1132 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1133 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1134 {
1135 	return z_impl_k_sleep(timeout);
1136 }
1137 #include <zephyr/syscalls/k_sleep_mrsh.c>
1138 #endif /* CONFIG_USERSPACE */
1139 
z_impl_k_usleep(int32_t us)1140 int32_t z_impl_k_usleep(int32_t us)
1141 {
1142 	int32_t ticks;
1143 
1144 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1145 
1146 	ticks = k_us_to_ticks_ceil64(us);
1147 	ticks = z_tick_sleep(Z_TIMEOUT_TICKS(ticks));
1148 
1149 	int32_t ret = k_ticks_to_us_ceil64(ticks);
1150 
1151 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1152 
1153 	return ret;
1154 }
1155 
1156 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int32_t us)1157 static inline int32_t z_vrfy_k_usleep(int32_t us)
1158 {
1159 	return z_impl_k_usleep(us);
1160 }
1161 #include <zephyr/syscalls/k_usleep_mrsh.c>
1162 #endif /* CONFIG_USERSPACE */
1163 
z_impl_k_wakeup(k_tid_t thread)1164 void z_impl_k_wakeup(k_tid_t thread)
1165 {
1166 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1167 
1168 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
1169 
1170 	if (z_is_thread_sleeping(thread)) {
1171 		z_abort_thread_timeout(thread);
1172 		z_mark_thread_as_not_sleeping(thread);
1173 		ready_thread(thread);
1174 		z_reschedule(&_sched_spinlock, key);
1175 	} else {
1176 		k_spin_unlock(&_sched_spinlock, key);
1177 	}
1178 }
1179 
1180 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1181 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1182 {
1183 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1184 	z_impl_k_wakeup(thread);
1185 }
1186 #include <zephyr/syscalls/k_wakeup_mrsh.c>
1187 #endif /* CONFIG_USERSPACE */
1188 
z_impl_k_sched_current_thread_query(void)1189 k_tid_t z_impl_k_sched_current_thread_query(void)
1190 {
1191 	return _current;
1192 }
1193 
1194 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1195 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1196 {
1197 	return z_impl_k_sched_current_thread_query();
1198 }
1199 #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
1200 #endif /* CONFIG_USERSPACE */
1201 
unpend_all(_wait_q_t * wait_q)1202 static inline void unpend_all(_wait_q_t *wait_q)
1203 {
1204 	struct k_thread *thread;
1205 
1206 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
1207 		unpend_thread_no_timeout(thread);
1208 		z_abort_thread_timeout(thread);
1209 		arch_thread_return_value_set(thread, 0);
1210 		ready_thread(thread);
1211 	}
1212 }
1213 
1214 #ifdef CONFIG_THREAD_ABORT_HOOK
1215 extern void thread_abort_hook(struct k_thread *thread);
1216 #endif /* CONFIG_THREAD_ABORT_HOOK */
1217 
1218 /**
1219  * @brief Dequeues the specified thread
1220  *
1221  * Dequeues the specified thread and move it into the specified new state.
1222  *
1223  * @param thread Identify the thread to halt
1224  * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1225  */
halt_thread(struct k_thread * thread,uint8_t new_state)1226 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state)
1227 {
1228 	bool dummify = false;
1229 
1230 	/* We hold the lock, and the thread is known not to be running
1231 	 * anywhere.
1232 	 */
1233 	if ((thread->base.thread_state & new_state) == 0U) {
1234 		thread->base.thread_state |= new_state;
1235 		if (z_is_thread_queued(thread)) {
1236 			dequeue_thread(thread);
1237 		}
1238 
1239 		if (new_state == _THREAD_DEAD) {
1240 			if (thread->base.pended_on != NULL) {
1241 				unpend_thread_no_timeout(thread);
1242 			}
1243 			z_abort_thread_timeout(thread);
1244 			unpend_all(&thread->join_queue);
1245 
1246 			/* Edge case: aborting _current from within an
1247 			 * ISR that preempted it requires clearing the
1248 			 * _current pointer so the upcoming context
1249 			 * switch doesn't clobber the now-freed
1250 			 * memory
1251 			 */
1252 			if (thread == _current && arch_is_in_isr()) {
1253 				dummify = true;
1254 			}
1255 		}
1256 #ifdef CONFIG_SMP
1257 		unpend_all(&thread->halt_queue);
1258 #endif /* CONFIG_SMP */
1259 		update_cache(1);
1260 
1261 		if (new_state == _THREAD_SUSPENDED) {
1262 			clear_halting(thread);
1263 			return;
1264 		}
1265 
1266 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1267 		arch_float_disable(thread);
1268 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
1269 
1270 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1271 
1272 		z_thread_monitor_exit(thread);
1273 #ifdef CONFIG_THREAD_ABORT_HOOK
1274 		thread_abort_hook(thread);
1275 #endif /* CONFIG_THREAD_ABORT_HOOK */
1276 
1277 #ifdef CONFIG_OBJ_CORE_THREAD
1278 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1279 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1280 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
1281 		k_obj_core_unlink(K_OBJ_CORE(thread));
1282 #endif /* CONFIG_OBJ_CORE_THREAD */
1283 
1284 #ifdef CONFIG_USERSPACE
1285 		z_mem_domain_exit_thread(thread);
1286 		k_thread_perms_all_clear(thread);
1287 		k_object_uninit(thread->stack_obj);
1288 		k_object_uninit(thread);
1289 #endif /* CONFIG_USERSPACE */
1290 
1291 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1292 		k_thread_abort_cleanup(thread);
1293 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1294 
1295 		/* Do this "set _current to dummy" step last so that
1296 		 * subsystems above can rely on _current being
1297 		 * unchanged.  Disabled for posix as that arch
1298 		 * continues to use the _current pointer in its swap
1299 		 * code.  Note that we must leave a non-null switch
1300 		 * handle for any threads spinning in join() (this can
1301 		 * never be used, as our thread is flagged dead, but
1302 		 * it must not be NULL otherwise join can deadlock).
1303 		 */
1304 		if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1305 #ifdef CONFIG_USE_SWITCH
1306 			_current->switch_handle = _current;
1307 #endif
1308 			z_dummy_thread_init(&_thread_dummy);
1309 
1310 		}
1311 
1312 		/* Finally update the halting thread state, on which
1313 		 * other CPUs might be spinning (see
1314 		 * thread_halt_spin()).
1315 		 */
1316 		clear_halting(thread);
1317 	}
1318 }
1319 
z_thread_abort(struct k_thread * thread)1320 void z_thread_abort(struct k_thread *thread)
1321 {
1322 	bool essential = z_is_thread_essential(thread);
1323 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1324 
1325 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1326 		k_spin_unlock(&_sched_spinlock, key);
1327 		return;
1328 	}
1329 
1330 	z_thread_halt(thread, key, true);
1331 
1332 	if (essential) {
1333 		__ASSERT(!essential, "aborted essential thread %p", thread);
1334 		k_panic();
1335 	}
1336 }
1337 
1338 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(k_tid_t thread)1339 void z_impl_k_thread_abort(k_tid_t thread)
1340 {
1341 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1342 
1343 	z_thread_abort(thread);
1344 
1345 	__ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1346 
1347 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1348 }
1349 #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
1350 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1351 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1352 {
1353 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1354 	int ret;
1355 
1356 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1357 
1358 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1359 		z_sched_switch_spin(thread);
1360 		ret = 0;
1361 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1362 		ret = -EBUSY;
1363 	} else if ((thread == _current) ||
1364 		   (thread->base.pended_on == &_current->join_queue)) {
1365 		ret = -EDEADLK;
1366 	} else {
1367 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1368 		add_to_waitq_locked(_current, &thread->join_queue);
1369 		add_thread_timeout(_current, timeout);
1370 
1371 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1372 		ret = z_swap(&_sched_spinlock, key);
1373 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1374 
1375 		return ret;
1376 	}
1377 
1378 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1379 
1380 	k_spin_unlock(&_sched_spinlock, key);
1381 	return ret;
1382 }
1383 
1384 #ifdef CONFIG_USERSPACE
1385 /* Special case: don't oops if the thread is uninitialized.  This is because
1386  * the initialization bit does double-duty for thread objects; if false, means
1387  * the thread object is truly uninitialized, or the thread ran and exited for
1388  * some reason.
1389  *
1390  * Return true in this case indicating we should just do nothing and return
1391  * success to the caller.
1392  */
thread_obj_validate(struct k_thread * thread)1393 static bool thread_obj_validate(struct k_thread *thread)
1394 {
1395 	struct k_object *ko = k_object_find(thread);
1396 	int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1397 
1398 	switch (ret) {
1399 	case 0:
1400 		return false;
1401 	case -EINVAL:
1402 		return true;
1403 	default:
1404 #ifdef CONFIG_LOG
1405 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1406 #endif /* CONFIG_LOG */
1407 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1408 	}
1409 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1410 }
1411 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1412 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1413 				       k_timeout_t timeout)
1414 {
1415 	if (thread_obj_validate(thread)) {
1416 		return 0;
1417 	}
1418 
1419 	return z_impl_k_thread_join(thread, timeout);
1420 }
1421 #include <zephyr/syscalls/k_thread_join_mrsh.c>
1422 
z_vrfy_k_thread_abort(k_tid_t thread)1423 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1424 {
1425 	if (thread_obj_validate(thread)) {
1426 		return;
1427 	}
1428 
1429 	K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
1430 				    "aborting essential thread %p", thread));
1431 
1432 	z_impl_k_thread_abort((struct k_thread *)thread);
1433 }
1434 #include <zephyr/syscalls/k_thread_abort_mrsh.c>
1435 #endif /* CONFIG_USERSPACE */
1436 
1437 /*
1438  * future scheduler.h API implementations
1439  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1440 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1441 {
1442 	struct k_thread *thread;
1443 	bool ret = false;
1444 
1445 	K_SPINLOCK(&_sched_spinlock) {
1446 		thread = _priq_wait_best(&wait_q->waitq);
1447 
1448 		if (thread != NULL) {
1449 			z_thread_return_value_set_with_data(thread,
1450 							    swap_retval,
1451 							    swap_data);
1452 			unpend_thread_no_timeout(thread);
1453 			z_abort_thread_timeout(thread);
1454 			ready_thread(thread);
1455 			ret = true;
1456 		}
1457 	}
1458 
1459 	return ret;
1460 }
1461 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1462 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1463 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1464 {
1465 	int ret = z_pend_curr(lock, key, wait_q, timeout);
1466 
1467 	if (data != NULL) {
1468 		*data = _current->base.swap_data;
1469 	}
1470 	return ret;
1471 }
1472 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1473 int z_sched_waitq_walk(_wait_q_t  *wait_q,
1474 		       int (*func)(struct k_thread *, void *), void *data)
1475 {
1476 	struct k_thread *thread;
1477 	int  status = 0;
1478 
1479 	K_SPINLOCK(&_sched_spinlock) {
1480 		_WAIT_Q_FOR_EACH(wait_q, thread) {
1481 
1482 			/*
1483 			 * Invoke the callback function on each waiting thread
1484 			 * for as long as there are both waiting threads AND
1485 			 * it returns 0.
1486 			 */
1487 
1488 			status = func(thread, data);
1489 			if (status != 0) {
1490 				break;
1491 			}
1492 		}
1493 	}
1494 
1495 	return status;
1496 }
1497 
1498 /* This routine exists for benchmarking purposes. It is not used in
1499  * general production code.
1500  */
z_unready_thread(struct k_thread * thread)1501 void z_unready_thread(struct k_thread *thread)
1502 {
1503 	K_SPINLOCK(&_sched_spinlock) {
1504 		unready_thread(thread);
1505 	}
1506 }
1507