1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <wait_q.h>
10 #include <kthread.h>
11 #include <priority_q.h>
12 #include <kswap.h>
13 #include <ipi.h>
14 #include <kernel_arch_func.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/drivers/timer/system_timer.h>
17 #include <stdbool.h>
18 #include <kernel_internal.h>
19 #include <zephyr/logging/log.h>
20 #include <zephyr/sys/atomic.h>
21 #include <zephyr/sys/math_extras.h>
22 #include <zephyr/timing/timing.h>
23 #include <zephyr/sys/util.h>
24 
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26 
27 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28 extern struct k_thread *pending_current;
29 #endif
30 
31 struct k_spinlock _sched_spinlock;
32 
33 /* Storage to "complete" the context switch from an invalid/incomplete thread
34  * context (ex: exiting an ISR that aborted arch_current_thread())
35  */
36 __incoherent struct k_thread _thread_dummy;
37 
38 static ALWAYS_INLINE void update_cache(int preempt_ok);
39 static void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
41 
42 
43 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 	     "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 	     "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 	     "threads.");
47 
48 /*
49  * Return value same as e.g. memcmp
50  * > 0 -> thread 1 priority  > thread 2 priority
51  * = 0 -> thread 1 priority == thread 2 priority
52  * < 0 -> thread 1 priority  < thread 2 priority
53  * Do not rely on the actual value returned aside from the above.
54  * (Again, like memcmp.)
55  */
z_sched_prio_cmp(struct k_thread * thread_1,struct k_thread * thread_2)56 int32_t z_sched_prio_cmp(struct k_thread *thread_1,
57 	struct k_thread *thread_2)
58 {
59 	/* `prio` is <32b, so the below cannot overflow. */
60 	int32_t b1 = thread_1->base.prio;
61 	int32_t b2 = thread_2->base.prio;
62 
63 	if (b1 != b2) {
64 		return b2 - b1;
65 	}
66 
67 #ifdef CONFIG_SCHED_DEADLINE
68 	/* If we assume all deadlines live within the same "half" of
69 	 * the 32 bit modulus space (this is a documented API rule),
70 	 * then the latest deadline in the queue minus the earliest is
71 	 * guaranteed to be (2's complement) non-negative.  We can
72 	 * leverage that to compare the values without having to check
73 	 * the current time.
74 	 */
75 	uint32_t d1 = thread_1->base.prio_deadline;
76 	uint32_t d2 = thread_2->base.prio_deadline;
77 
78 	if (d1 != d2) {
79 		/* Sooner deadline means higher effective priority.
80 		 * Doing the calculation with unsigned types and casting
81 		 * to signed isn't perfect, but at least reduces this
82 		 * from UB on overflow to impdef.
83 		 */
84 		return (int32_t) (d2 - d1);
85 	}
86 #endif /* CONFIG_SCHED_DEADLINE */
87 	return 0;
88 }
89 
thread_runq(struct k_thread * thread)90 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
91 {
92 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
93 	int cpu, m = thread->base.cpu_mask;
94 
95 	/* Edge case: it's legal per the API to "make runnable" a
96 	 * thread with all CPUs masked off (i.e. one that isn't
97 	 * actually runnable!).  Sort of a wart in the API and maybe
98 	 * we should address this in docs/assertions instead to avoid
99 	 * the extra test.
100 	 */
101 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
102 
103 	return &_kernel.cpus[cpu].ready_q.runq;
104 #else
105 	ARG_UNUSED(thread);
106 	return &_kernel.ready_q.runq;
107 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
108 }
109 
curr_cpu_runq(void)110 static ALWAYS_INLINE void *curr_cpu_runq(void)
111 {
112 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
113 	return &arch_curr_cpu()->ready_q.runq;
114 #else
115 	return &_kernel.ready_q.runq;
116 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
117 }
118 
runq_add(struct k_thread * thread)119 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
120 {
121 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
122 
123 	_priq_run_add(thread_runq(thread), thread);
124 }
125 
runq_remove(struct k_thread * thread)126 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
127 {
128 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
129 
130 	_priq_run_remove(thread_runq(thread), thread);
131 }
132 
runq_best(void)133 static ALWAYS_INLINE struct k_thread *runq_best(void)
134 {
135 	return _priq_run_best(curr_cpu_runq());
136 }
137 
138 /* arch_current_thread() is never in the run queue until context switch on
139  * SMP configurations, see z_requeue_current()
140  */
should_queue_thread(struct k_thread * thread)141 static inline bool should_queue_thread(struct k_thread *thread)
142 {
143 	return !IS_ENABLED(CONFIG_SMP) || (thread != arch_current_thread());
144 }
145 
queue_thread(struct k_thread * thread)146 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
147 {
148 	thread->base.thread_state |= _THREAD_QUEUED;
149 	if (should_queue_thread(thread)) {
150 		runq_add(thread);
151 	}
152 #ifdef CONFIG_SMP
153 	if (thread == arch_current_thread()) {
154 		/* add current to end of queue means "yield" */
155 		_current_cpu->swap_ok = true;
156 	}
157 #endif /* CONFIG_SMP */
158 }
159 
dequeue_thread(struct k_thread * thread)160 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
161 {
162 	thread->base.thread_state &= ~_THREAD_QUEUED;
163 	if (should_queue_thread(thread)) {
164 		runq_remove(thread);
165 	}
166 }
167 
168 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
169  * never live in the run queue until we are inexorably on the context
170  * switch path on SMP, otherwise there is a deadlock condition where a
171  * set of CPUs pick a cycle of threads to run and wait for them all to
172  * context switch forever.
173  */
z_requeue_current(struct k_thread * thread)174 void z_requeue_current(struct k_thread *thread)
175 {
176 	if (z_is_thread_queued(thread)) {
177 		runq_add(thread);
178 	}
179 	signal_pending_ipi();
180 }
181 
182 /* Return true if the thread is aborting, else false */
is_aborting(struct k_thread * thread)183 static inline bool is_aborting(struct k_thread *thread)
184 {
185 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
186 }
187 
188 /* Return true if the thread is aborting or suspending, else false */
is_halting(struct k_thread * thread)189 static inline bool is_halting(struct k_thread *thread)
190 {
191 	return (thread->base.thread_state &
192 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
193 }
194 
195 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)196 static inline void clear_halting(struct k_thread *thread)
197 {
198 	barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
199 	thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
200 }
201 
next_up(void)202 static ALWAYS_INLINE struct k_thread *next_up(void)
203 {
204 #ifdef CONFIG_SMP
205 	if (is_halting(arch_current_thread())) {
206 		halt_thread(arch_current_thread(), is_aborting(arch_current_thread()) ?
207 				      _THREAD_DEAD : _THREAD_SUSPENDED);
208 	}
209 #endif /* CONFIG_SMP */
210 
211 	struct k_thread *thread = runq_best();
212 
213 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
214 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
215 	/* MetaIRQs must always attempt to return back to a
216 	 * cooperative thread they preempted and not whatever happens
217 	 * to be highest priority now. The cooperative thread was
218 	 * promised it wouldn't be preempted (by non-metairq threads)!
219 	 */
220 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
221 
222 	if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
223 		if (!z_is_thread_prevented_from_running(mirqp)) {
224 			thread = mirqp;
225 		} else {
226 			_current_cpu->metairq_preempted = NULL;
227 		}
228 	}
229 #endif
230 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
231  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
232  */
233 
234 #ifndef CONFIG_SMP
235 	/* In uniprocessor mode, we can leave the current thread in
236 	 * the queue (actually we have to, otherwise the assembly
237 	 * context switch code for all architectures would be
238 	 * responsible for putting it back in z_swap and ISR return!),
239 	 * which makes this choice simple.
240 	 */
241 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
242 #else
243 	/* Under SMP, the "cache" mechanism for selecting the next
244 	 * thread doesn't work, so we have more work to do to test
245 	 * arch_current_thread() against the best choice from the queue.  Here, the
246 	 * thread selected above represents "the best thread that is
247 	 * not current".
248 	 *
249 	 * Subtle note on "queued": in SMP mode, arch_current_thread() does not
250 	 * live in the queue, so this isn't exactly the same thing as
251 	 * "ready", it means "is arch_current_thread() already added back to the
252 	 * queue such that we don't want to re-add it".
253 	 */
254 	bool queued = z_is_thread_queued(arch_current_thread());
255 	bool active = !z_is_thread_prevented_from_running(arch_current_thread());
256 
257 	if (thread == NULL) {
258 		thread = _current_cpu->idle_thread;
259 	}
260 
261 	if (active) {
262 		int32_t cmp = z_sched_prio_cmp(arch_current_thread(), thread);
263 
264 		/* Ties only switch if state says we yielded */
265 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
266 			thread = arch_current_thread();
267 		}
268 
269 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
270 			thread = arch_current_thread();
271 		}
272 	}
273 
274 	/* Put arch_current_thread() back into the queue */
275 	if ((thread != arch_current_thread()) && active &&
276 		!z_is_idle_thread_object(arch_current_thread()) && !queued) {
277 		queue_thread(arch_current_thread());
278 	}
279 
280 	/* Take the new arch_current_thread() out of the queue */
281 	if (z_is_thread_queued(thread)) {
282 		dequeue_thread(thread);
283 	}
284 
285 	_current_cpu->swap_ok = false;
286 	return thread;
287 #endif /* CONFIG_SMP */
288 }
289 
move_thread_to_end_of_prio_q(struct k_thread * thread)290 void move_thread_to_end_of_prio_q(struct k_thread *thread)
291 {
292 	if (z_is_thread_queued(thread)) {
293 		dequeue_thread(thread);
294 	}
295 	queue_thread(thread);
296 	update_cache(thread == arch_current_thread());
297 }
298 
299 /* Track cooperative threads preempted by metairqs so we can return to
300  * them specifically.  Called at the moment a new thread has been
301  * selected to run.
302  */
update_metairq_preempt(struct k_thread * thread)303 static void update_metairq_preempt(struct k_thread *thread)
304 {
305 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
306 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
307 	if (thread_is_metairq(thread) && !thread_is_metairq(arch_current_thread()) &&
308 	    !thread_is_preemptible(arch_current_thread())) {
309 		/* Record new preemption */
310 		_current_cpu->metairq_preempted = arch_current_thread();
311 	} else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
312 		/* Returning from existing preemption */
313 		_current_cpu->metairq_preempted = NULL;
314 	}
315 #else
316 	ARG_UNUSED(thread);
317 #endif
318 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
319  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
320  */
321 }
322 
update_cache(int preempt_ok)323 static ALWAYS_INLINE void update_cache(int preempt_ok)
324 {
325 #ifndef CONFIG_SMP
326 	struct k_thread *thread = next_up();
327 
328 	if (should_preempt(thread, preempt_ok)) {
329 #ifdef CONFIG_TIMESLICING
330 		if (thread != arch_current_thread()) {
331 			z_reset_time_slice(thread);
332 		}
333 #endif /* CONFIG_TIMESLICING */
334 		update_metairq_preempt(thread);
335 		_kernel.ready_q.cache = thread;
336 	} else {
337 		_kernel.ready_q.cache = arch_current_thread();
338 	}
339 
340 #else
341 	/* The way this works is that the CPU record keeps its
342 	 * "cooperative swapping is OK" flag until the next reschedule
343 	 * call or context switch.  It doesn't need to be tracked per
344 	 * thread because if the thread gets preempted for whatever
345 	 * reason the scheduler will make the same decision anyway.
346 	 */
347 	_current_cpu->swap_ok = preempt_ok;
348 #endif /* CONFIG_SMP */
349 }
350 
thread_active_elsewhere(struct k_thread * thread)351 static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
352 {
353 	/* Returns pointer to _cpu if the thread is currently running on
354 	 * another CPU. There are more scalable designs to answer this
355 	 * question in constant time, but this is fine for now.
356 	 */
357 #ifdef CONFIG_SMP
358 	int currcpu = _current_cpu->id;
359 
360 	unsigned int num_cpus = arch_num_cpus();
361 
362 	for (int i = 0; i < num_cpus; i++) {
363 		if ((i != currcpu) &&
364 		    (_kernel.cpus[i].current == thread)) {
365 			return &_kernel.cpus[i];
366 		}
367 	}
368 #endif /* CONFIG_SMP */
369 	ARG_UNUSED(thread);
370 	return NULL;
371 }
372 
ready_thread(struct k_thread * thread)373 static void ready_thread(struct k_thread *thread)
374 {
375 #ifdef CONFIG_KERNEL_COHERENCE
376 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
377 #endif /* CONFIG_KERNEL_COHERENCE */
378 
379 	/* If thread is queued already, do not try and added it to the
380 	 * run queue again
381 	 */
382 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
383 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
384 
385 		queue_thread(thread);
386 		update_cache(0);
387 
388 		flag_ipi(ipi_mask_create(thread));
389 	}
390 }
391 
z_ready_thread(struct k_thread * thread)392 void z_ready_thread(struct k_thread *thread)
393 {
394 	K_SPINLOCK(&_sched_spinlock) {
395 		if (thread_active_elsewhere(thread) == NULL) {
396 			ready_thread(thread);
397 		}
398 	}
399 }
400 
z_move_thread_to_end_of_prio_q(struct k_thread * thread)401 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
402 {
403 	K_SPINLOCK(&_sched_spinlock) {
404 		move_thread_to_end_of_prio_q(thread);
405 	}
406 }
407 
408 /* Spins in ISR context, waiting for a thread known to be running on
409  * another CPU to catch the IPI we sent and halt.  Note that we check
410  * for ourselves being asynchronously halted first to prevent simple
411  * deadlocks (but not complex ones involving cycles of 3+ threads!).
412  * Acts to release the provided lock before returning.
413  */
thread_halt_spin(struct k_thread * thread,k_spinlock_key_t key)414 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
415 {
416 	if (is_halting(arch_current_thread())) {
417 		halt_thread(arch_current_thread(),
418 			    is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED);
419 	}
420 	k_spin_unlock(&_sched_spinlock, key);
421 	while (is_halting(thread)) {
422 		unsigned int k = arch_irq_lock();
423 
424 		arch_spin_relax(); /* Requires interrupts be masked */
425 		arch_irq_unlock(k);
426 	}
427 }
428 
429 /* Shared handler for k_thread_{suspend,abort}().  Called with the
430  * scheduler lock held and the key passed (which it may
431  * release/reacquire!) which will be released before a possible return
432  * (aborting arch_current_thread() will not return, obviously), which may be after
433  * a context switch.
434  */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)435 static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
436 			  bool terminate)
437 {
438 	_wait_q_t *wq = &thread->join_queue;
439 #ifdef CONFIG_SMP
440 	wq = terminate ? wq : &thread->halt_queue;
441 #endif
442 
443 	/* If the target is a thread running on another CPU, flag and
444 	 * poke (note that we might spin to wait, so a true
445 	 * synchronous IPI is needed here, not deferred!), it will
446 	 * halt itself in the IPI.  Otherwise it's unscheduled, so we
447 	 * can clean it up directly.
448 	 */
449 
450 	struct _cpu *cpu = thread_active_elsewhere(thread);
451 
452 	if (cpu != NULL) {
453 		thread->base.thread_state |= (terminate ? _THREAD_ABORTING
454 					      : _THREAD_SUSPENDING);
455 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
456 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
457 		arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
458 #else
459 		arch_sched_broadcast_ipi();
460 #endif
461 #endif
462 		if (arch_is_in_isr()) {
463 			thread_halt_spin(thread, key);
464 		} else  {
465 			add_to_waitq_locked(arch_current_thread(), wq);
466 			z_swap(&_sched_spinlock, key);
467 		}
468 	} else {
469 		halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
470 		if ((thread == arch_current_thread()) && !arch_is_in_isr()) {
471 			z_swap(&_sched_spinlock, key);
472 			__ASSERT(!terminate, "aborted arch_current_thread() back from dead");
473 		} else {
474 			k_spin_unlock(&_sched_spinlock, key);
475 		}
476 	}
477 	/* NOTE: the scheduler lock has been released.  Don't put
478 	 * logic here, it's likely to be racy/deadlocky even if you
479 	 * re-take the lock!
480 	 */
481 }
482 
483 
z_impl_k_thread_suspend(k_tid_t thread)484 void z_impl_k_thread_suspend(k_tid_t thread)
485 {
486 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
487 
488 	/* Special case "suspend the current thread" as it doesn't
489 	 * need the async complexity below.
490 	 */
491 	if (thread == arch_current_thread() && !arch_is_in_isr() && !IS_ENABLED(CONFIG_SMP)) {
492 		k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
493 
494 		z_mark_thread_as_suspended(thread);
495 		dequeue_thread(thread);
496 		update_cache(1);
497 		z_swap(&_sched_spinlock, key);
498 		return;
499 	}
500 
501 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
502 
503 	if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
504 
505 		/* The target thread is already suspended. Nothing to do. */
506 
507 		k_spin_unlock(&_sched_spinlock, key);
508 		return;
509 	}
510 
511 	z_thread_halt(thread, key, false);
512 
513 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
514 }
515 
516 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(k_tid_t thread)517 static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
518 {
519 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
520 	z_impl_k_thread_suspend(thread);
521 }
522 #include <zephyr/syscalls/k_thread_suspend_mrsh.c>
523 #endif /* CONFIG_USERSPACE */
524 
z_impl_k_thread_resume(k_tid_t thread)525 void z_impl_k_thread_resume(k_tid_t thread)
526 {
527 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
528 
529 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
530 
531 	/* Do not try to resume a thread that was not suspended */
532 	if (!z_is_thread_suspended(thread)) {
533 		k_spin_unlock(&_sched_spinlock, key);
534 		return;
535 	}
536 
537 	z_mark_thread_as_not_suspended(thread);
538 	ready_thread(thread);
539 
540 	z_reschedule(&_sched_spinlock, key);
541 
542 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
543 }
544 
545 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(k_tid_t thread)546 static inline void z_vrfy_k_thread_resume(k_tid_t thread)
547 {
548 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
549 	z_impl_k_thread_resume(thread);
550 }
551 #include <zephyr/syscalls/k_thread_resume_mrsh.c>
552 #endif /* CONFIG_USERSPACE */
553 
unready_thread(struct k_thread * thread)554 static void unready_thread(struct k_thread *thread)
555 {
556 	if (z_is_thread_queued(thread)) {
557 		dequeue_thread(thread);
558 	}
559 	update_cache(thread == arch_current_thread());
560 }
561 
562 /* _sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)563 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
564 {
565 	unready_thread(thread);
566 	z_mark_thread_as_pending(thread);
567 
568 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
569 
570 	if (wait_q != NULL) {
571 		thread->base.pended_on = wait_q;
572 		_priq_wait_add(&wait_q->waitq, thread);
573 	}
574 }
575 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)576 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
577 {
578 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
579 		z_add_thread_timeout(thread, timeout);
580 	}
581 }
582 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)583 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
584 			k_timeout_t timeout)
585 {
586 #ifdef CONFIG_KERNEL_COHERENCE
587 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
588 #endif /* CONFIG_KERNEL_COHERENCE */
589 	add_to_waitq_locked(thread, wait_q);
590 	add_thread_timeout(thread, timeout);
591 }
592 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)593 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
594 		   k_timeout_t timeout)
595 {
596 	__ASSERT_NO_MSG(thread == arch_current_thread() || is_thread_dummy(thread));
597 	K_SPINLOCK(&_sched_spinlock) {
598 		pend_locked(thread, wait_q, timeout);
599 	}
600 }
601 
z_unpend_thread_no_timeout(struct k_thread * thread)602 void z_unpend_thread_no_timeout(struct k_thread *thread)
603 {
604 	K_SPINLOCK(&_sched_spinlock) {
605 		if (thread->base.pended_on != NULL) {
606 			unpend_thread_no_timeout(thread);
607 		}
608 	}
609 }
610 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)611 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
612 {
613 	K_SPINLOCK(&_sched_spinlock) {
614 		bool killed = (thread->base.thread_state &
615 				(_THREAD_DEAD | _THREAD_ABORTING));
616 
617 #ifdef CONFIG_EVENTS
618 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
619 
620 		thread->no_wake_on_timeout = false;
621 
622 		if (do_nothing) {
623 			continue;
624 		}
625 #endif /* CONFIG_EVENTS */
626 
627 		if (!killed) {
628 			/* The thread is not being killed */
629 			if (thread->base.pended_on != NULL) {
630 				unpend_thread_no_timeout(thread);
631 			}
632 			z_mark_thread_as_not_sleeping(thread);
633 			ready_thread(thread);
634 		}
635 	}
636 
637 }
638 
639 #ifdef CONFIG_SYS_CLOCK_EXISTS
640 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)641 void z_thread_timeout(struct _timeout *timeout)
642 {
643 	struct k_thread *thread = CONTAINER_OF(timeout,
644 					       struct k_thread, base.timeout);
645 
646 	z_sched_wake_thread(thread, true);
647 }
648 #endif /* CONFIG_SYS_CLOCK_EXISTS */
649 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)650 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
651 	       _wait_q_t *wait_q, k_timeout_t timeout)
652 {
653 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
654 	pending_current = arch_current_thread();
655 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
656 	__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
657 
658 	/* We do a "lock swap" prior to calling z_swap(), such that
659 	 * the caller's lock gets released as desired.  But we ensure
660 	 * that we hold the scheduler lock and leave local interrupts
661 	 * masked until we reach the context switch.  z_swap() itself
662 	 * has similar code; the duplication is because it's a legacy
663 	 * API that doesn't expect to be called with scheduler lock
664 	 * held.
665 	 */
666 	(void) k_spin_lock(&_sched_spinlock);
667 	pend_locked(arch_current_thread(), wait_q, timeout);
668 	k_spin_release(lock);
669 	return z_swap(&_sched_spinlock, key);
670 }
671 
z_unpend1_no_timeout(_wait_q_t * wait_q)672 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
673 {
674 	struct k_thread *thread = NULL;
675 
676 	K_SPINLOCK(&_sched_spinlock) {
677 		thread = _priq_wait_best(&wait_q->waitq);
678 
679 		if (thread != NULL) {
680 			unpend_thread_no_timeout(thread);
681 		}
682 	}
683 
684 	return thread;
685 }
686 
z_unpend_thread(struct k_thread * thread)687 void z_unpend_thread(struct k_thread *thread)
688 {
689 	z_unpend_thread_no_timeout(thread);
690 	(void)z_abort_thread_timeout(thread);
691 }
692 
693 /* Priority set utility that does no rescheduling, it just changes the
694  * run queue state, returning true if a reschedule is needed later.
695  */
z_thread_prio_set(struct k_thread * thread,int prio)696 bool z_thread_prio_set(struct k_thread *thread, int prio)
697 {
698 	bool need_sched = 0;
699 	int old_prio = thread->base.prio;
700 
701 	K_SPINLOCK(&_sched_spinlock) {
702 		need_sched = z_is_thread_ready(thread);
703 
704 		if (need_sched) {
705 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
706 				dequeue_thread(thread);
707 				thread->base.prio = prio;
708 				queue_thread(thread);
709 
710 				if (old_prio > prio) {
711 					flag_ipi(ipi_mask_create(thread));
712 				}
713 			} else {
714 				/*
715 				 * This is a running thread on SMP. Update its
716 				 * priority, but do not requeue it. An IPI is
717 				 * needed if the priority is both being lowered
718 				 * and it is running on another CPU.
719 				 */
720 
721 				thread->base.prio = prio;
722 
723 				struct _cpu *cpu;
724 
725 				cpu = thread_active_elsewhere(thread);
726 				if ((cpu != NULL) && (old_prio < prio)) {
727 					flag_ipi(IPI_CPU_MASK(cpu->id));
728 				}
729 			}
730 
731 			update_cache(1);
732 		} else {
733 			thread->base.prio = prio;
734 		}
735 	}
736 
737 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
738 
739 	return need_sched;
740 }
741 
resched(uint32_t key)742 static inline bool resched(uint32_t key)
743 {
744 #ifdef CONFIG_SMP
745 	_current_cpu->swap_ok = 0;
746 #endif /* CONFIG_SMP */
747 
748 	return arch_irq_unlocked(key) && !arch_is_in_isr();
749 }
750 
751 /*
752  * Check if the next ready thread is the same as the current thread
753  * and save the trip if true.
754  */
need_swap(void)755 static inline bool need_swap(void)
756 {
757 	/* the SMP case will be handled in C based z_swap() */
758 #ifdef CONFIG_SMP
759 	return true;
760 #else
761 	struct k_thread *new_thread;
762 
763 	/* Check if the next ready thread is the same as the current thread */
764 	new_thread = _kernel.ready_q.cache;
765 	return new_thread != arch_current_thread();
766 #endif /* CONFIG_SMP */
767 }
768 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)769 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
770 {
771 	if (resched(key.key) && need_swap()) {
772 		z_swap(lock, key);
773 	} else {
774 		k_spin_unlock(lock, key);
775 		signal_pending_ipi();
776 	}
777 }
778 
z_reschedule_irqlock(uint32_t key)779 void z_reschedule_irqlock(uint32_t key)
780 {
781 	if (resched(key) && need_swap()) {
782 		z_swap_irqlock(key);
783 	} else {
784 		irq_unlock(key);
785 		signal_pending_ipi();
786 	}
787 }
788 
k_sched_lock(void)789 void k_sched_lock(void)
790 {
791 	K_SPINLOCK(&_sched_spinlock) {
792 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
793 
794 		z_sched_lock();
795 	}
796 }
797 
k_sched_unlock(void)798 void k_sched_unlock(void)
799 {
800 	K_SPINLOCK(&_sched_spinlock) {
801 		__ASSERT(arch_current_thread()->base.sched_locked != 0U, "");
802 		__ASSERT(!arch_is_in_isr(), "");
803 
804 		++arch_current_thread()->base.sched_locked;
805 		update_cache(0);
806 	}
807 
808 	LOG_DBG("scheduler unlocked (%p:%d)",
809 		arch_current_thread(), arch_current_thread()->base.sched_locked);
810 
811 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
812 
813 	z_reschedule_unlocked();
814 }
815 
z_swap_next_thread(void)816 struct k_thread *z_swap_next_thread(void)
817 {
818 #ifdef CONFIG_SMP
819 	struct k_thread *ret = next_up();
820 
821 	if (ret == arch_current_thread()) {
822 		/* When not swapping, have to signal IPIs here.  In
823 		 * the context switch case it must happen later, after
824 		 * arch_current_thread() gets requeued.
825 		 */
826 		signal_pending_ipi();
827 	}
828 	return ret;
829 #else
830 	return _kernel.ready_q.cache;
831 #endif /* CONFIG_SMP */
832 }
833 
834 #ifdef CONFIG_USE_SWITCH
835 /* Just a wrapper around arch_current_thread_set(xxx) with tracing */
set_current(struct k_thread * new_thread)836 static inline void set_current(struct k_thread *new_thread)
837 {
838 	z_thread_mark_switched_out();
839 	arch_current_thread_set(new_thread);
840 }
841 
842 /**
843  * @brief Determine next thread to execute upon completion of an interrupt
844  *
845  * Thread preemption is performed by context switching after the completion
846  * of a non-recursed interrupt. This function determines which thread to
847  * switch to if any. This function accepts as @p interrupted either:
848  *
849  * - The handle for the interrupted thread in which case the thread's context
850  *   must already be fully saved and ready to be picked up by a different CPU.
851  *
852  * - NULL if more work is required to fully save the thread's state after
853  *   it is known that a new thread is to be scheduled. It is up to the caller
854  *   to store the handle resulting from the thread that is being switched out
855  *   in that thread's "switch_handle" field after its
856  *   context has fully been saved, following the same requirements as with
857  *   the @ref arch_switch() function.
858  *
859  * If a new thread needs to be scheduled then its handle is returned.
860  * Otherwise the same value provided as @p interrupted is returned back.
861  * Those handles are the same opaque types used by the @ref arch_switch()
862  * function.
863  *
864  * @warning
865  * The arch_current_thread() value may have changed after this call and not refer
866  * to the interrupted thread anymore. It might be necessary to make a local
867  * copy before calling this function.
868  *
869  * @param interrupted Handle for the thread that was interrupted or NULL.
870  * @retval Handle for the next thread to execute, or @p interrupted when
871  *         no new thread is to be scheduled.
872  */
z_get_next_switch_handle(void * interrupted)873 void *z_get_next_switch_handle(void *interrupted)
874 {
875 	z_check_stack_sentinel();
876 
877 #ifdef CONFIG_SMP
878 	void *ret = NULL;
879 
880 	K_SPINLOCK(&_sched_spinlock) {
881 		struct k_thread *old_thread = arch_current_thread(), *new_thread;
882 
883 		if (IS_ENABLED(CONFIG_SMP)) {
884 			old_thread->switch_handle = NULL;
885 		}
886 		new_thread = next_up();
887 
888 		z_sched_usage_switch(new_thread);
889 
890 		if (old_thread != new_thread) {
891 			uint8_t  cpu_id;
892 
893 			update_metairq_preempt(new_thread);
894 			z_sched_switch_spin(new_thread);
895 			arch_cohere_stacks(old_thread, interrupted, new_thread);
896 
897 			_current_cpu->swap_ok = 0;
898 			cpu_id = arch_curr_cpu()->id;
899 			new_thread->base.cpu = cpu_id;
900 			set_current(new_thread);
901 
902 #ifdef CONFIG_TIMESLICING
903 			z_reset_time_slice(new_thread);
904 #endif /* CONFIG_TIMESLICING */
905 
906 #ifdef CONFIG_SPIN_VALIDATE
907 			/* Changed arch_current_thread()!  Update the spinlock
908 			 * bookkeeping so the validation doesn't get
909 			 * confused when the "wrong" thread tries to
910 			 * release the lock.
911 			 */
912 			z_spin_lock_set_owner(&_sched_spinlock);
913 #endif /* CONFIG_SPIN_VALIDATE */
914 
915 			/* A queued (runnable) old/current thread
916 			 * needs to be added back to the run queue
917 			 * here, and atomically with its switch handle
918 			 * being set below.  This is safe now, as we
919 			 * will not return into it.
920 			 */
921 			if (z_is_thread_queued(old_thread)) {
922 #ifdef CONFIG_SCHED_IPI_CASCADE
923 				if ((new_thread->base.cpu_mask != -1) &&
924 				    (old_thread->base.cpu_mask != BIT(cpu_id))) {
925 					flag_ipi(ipi_mask_create(old_thread));
926 				}
927 #endif
928 				runq_add(old_thread);
929 			}
930 		}
931 		old_thread->switch_handle = interrupted;
932 		ret = new_thread->switch_handle;
933 		if (IS_ENABLED(CONFIG_SMP)) {
934 			/* Active threads MUST have a null here */
935 			new_thread->switch_handle = NULL;
936 		}
937 	}
938 	signal_pending_ipi();
939 	return ret;
940 #else
941 	z_sched_usage_switch(_kernel.ready_q.cache);
942 	arch_current_thread()->switch_handle = interrupted;
943 	set_current(_kernel.ready_q.cache);
944 	return arch_current_thread()->switch_handle;
945 #endif /* CONFIG_SMP */
946 }
947 #endif /* CONFIG_USE_SWITCH */
948 
z_unpend_all(_wait_q_t * wait_q)949 int z_unpend_all(_wait_q_t *wait_q)
950 {
951 	int need_sched = 0;
952 	struct k_thread *thread;
953 
954 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
955 		z_unpend_thread(thread);
956 		z_ready_thread(thread);
957 		need_sched = 1;
958 	}
959 
960 	return need_sched;
961 }
962 
init_ready_q(struct _ready_q * ready_q)963 void init_ready_q(struct _ready_q *ready_q)
964 {
965 	_priq_run_init(&ready_q->runq);
966 }
967 
z_sched_init(void)968 void z_sched_init(void)
969 {
970 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
971 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
972 		init_ready_q(&_kernel.cpus[i].ready_q);
973 	}
974 #else
975 	init_ready_q(&_kernel.ready_q);
976 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
977 }
978 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)979 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
980 {
981 	/*
982 	 * Use NULL, since we cannot know what the entry point is (we do not
983 	 * keep track of it) and idle cannot change its priority.
984 	 */
985 	Z_ASSERT_VALID_PRIO(prio, NULL);
986 
987 	bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
988 
989 	if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
990 			     (arch_current_thread()->base.sched_locked == 0U))) {
991 		z_reschedule_unlocked();
992 	}
993 }
994 
995 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)996 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
997 {
998 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
999 	K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
1000 				    "invalid thread priority %d", prio));
1001 #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
1002 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
1003 				    "thread priority may only be downgraded (%d < %d)",
1004 				    prio, thread->base.prio));
1005 #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
1006 	z_impl_k_thread_priority_set(thread, prio);
1007 }
1008 #include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
1009 #endif /* CONFIG_USERSPACE */
1010 
1011 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1012 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1013 {
1014 
1015 	deadline = CLAMP(deadline, 0, INT_MAX);
1016 
1017 	struct k_thread *thread = tid;
1018 	int32_t newdl = k_cycle_get_32() + deadline;
1019 
1020 	/* The prio_deadline field changes the sorting order, so can't
1021 	 * change it while the thread is in the run queue (dlists
1022 	 * actually are benign as long as we requeue it before we
1023 	 * release the lock, but an rbtree will blow up if we break
1024 	 * sorting!)
1025 	 */
1026 	K_SPINLOCK(&_sched_spinlock) {
1027 		if (z_is_thread_queued(thread)) {
1028 			dequeue_thread(thread);
1029 			thread->base.prio_deadline = newdl;
1030 			queue_thread(thread);
1031 		} else {
1032 			thread->base.prio_deadline = newdl;
1033 		}
1034 	}
1035 }
1036 
1037 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1038 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1039 {
1040 	struct k_thread *thread = tid;
1041 
1042 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1043 	K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1044 				    "invalid thread deadline %d",
1045 				    (int)deadline));
1046 
1047 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1048 }
1049 #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
1050 #endif /* CONFIG_USERSPACE */
1051 #endif /* CONFIG_SCHED_DEADLINE */
1052 
z_impl_k_reschedule(void)1053 void z_impl_k_reschedule(void)
1054 {
1055 	k_spinlock_key_t key;
1056 
1057 	key = k_spin_lock(&_sched_spinlock);
1058 
1059 	update_cache(0);
1060 
1061 	z_reschedule(&_sched_spinlock, key);
1062 }
1063 
1064 #ifdef CONFIG_USERSPACE
z_vrfy_k_reschedule(void)1065 static inline void z_vrfy_k_reschedule(void)
1066 {
1067 	z_impl_k_reschedule();
1068 }
1069 #endif /* CONFIG_USERSPACE */
1070 
k_can_yield(void)1071 bool k_can_yield(void)
1072 {
1073 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1074 		 z_is_idle_thread_object(arch_current_thread()));
1075 }
1076 
z_impl_k_yield(void)1077 void z_impl_k_yield(void)
1078 {
1079 	__ASSERT(!arch_is_in_isr(), "");
1080 
1081 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1082 
1083 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1084 
1085 	if (!IS_ENABLED(CONFIG_SMP) ||
1086 	    z_is_thread_queued(arch_current_thread())) {
1087 		dequeue_thread(arch_current_thread());
1088 	}
1089 	queue_thread(arch_current_thread());
1090 	update_cache(1);
1091 	z_swap(&_sched_spinlock, key);
1092 }
1093 
1094 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1095 static inline void z_vrfy_k_yield(void)
1096 {
1097 	z_impl_k_yield();
1098 }
1099 #include <zephyr/syscalls/k_yield_mrsh.c>
1100 #endif /* CONFIG_USERSPACE */
1101 
z_tick_sleep(k_ticks_t ticks)1102 static int32_t z_tick_sleep(k_ticks_t ticks)
1103 {
1104 	uint32_t expected_wakeup_ticks;
1105 
1106 	__ASSERT(!arch_is_in_isr(), "");
1107 
1108 	LOG_DBG("thread %p for %lu ticks", arch_current_thread(), (unsigned long)ticks);
1109 
1110 	/* wait of 0 ms is treated as a 'yield' */
1111 	if (ticks == 0) {
1112 		k_yield();
1113 		return 0;
1114 	}
1115 
1116 	if (Z_TICK_ABS(ticks) <= 0) {
1117 		expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1118 	} else {
1119 		expected_wakeup_ticks = Z_TICK_ABS(ticks);
1120 	}
1121 
1122 	k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
1123 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1124 
1125 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1126 	pending_current = arch_current_thread();
1127 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
1128 	unready_thread(arch_current_thread());
1129 	z_add_thread_timeout(arch_current_thread(), timeout);
1130 	z_mark_thread_as_sleeping(arch_current_thread());
1131 
1132 	(void)z_swap(&_sched_spinlock, key);
1133 
1134 	/* We require a 32 bit unsigned subtraction to care a wraparound */
1135 	uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32();
1136 
1137 	/* To handle a negative value correctly, once type-cast it to signed 32 bit */
1138 	ticks = (k_ticks_t)(int32_t)left_ticks;
1139 	if (ticks > 0) {
1140 		return ticks;
1141 	}
1142 
1143 	return 0;
1144 }
1145 
z_impl_k_sleep(k_timeout_t timeout)1146 int32_t z_impl_k_sleep(k_timeout_t timeout)
1147 {
1148 	k_ticks_t ticks;
1149 
1150 	__ASSERT(!arch_is_in_isr(), "");
1151 
1152 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1153 
1154 	ticks = timeout.ticks;
1155 
1156 	ticks = z_tick_sleep(ticks);
1157 
1158 	int32_t ret = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
1159 		      k_ticks_to_ms_ceil64(ticks);
1160 
1161 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1162 
1163 	return ret;
1164 }
1165 
1166 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1167 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1168 {
1169 	return z_impl_k_sleep(timeout);
1170 }
1171 #include <zephyr/syscalls/k_sleep_mrsh.c>
1172 #endif /* CONFIG_USERSPACE */
1173 
z_impl_k_usleep(int32_t us)1174 int32_t z_impl_k_usleep(int32_t us)
1175 {
1176 	int32_t ticks;
1177 
1178 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1179 
1180 	ticks = k_us_to_ticks_ceil64(us);
1181 	ticks = z_tick_sleep(ticks);
1182 
1183 	int32_t ret = k_ticks_to_us_ceil64(ticks);
1184 
1185 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1186 
1187 	return ret;
1188 }
1189 
1190 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int32_t us)1191 static inline int32_t z_vrfy_k_usleep(int32_t us)
1192 {
1193 	return z_impl_k_usleep(us);
1194 }
1195 #include <zephyr/syscalls/k_usleep_mrsh.c>
1196 #endif /* CONFIG_USERSPACE */
1197 
z_impl_k_wakeup(k_tid_t thread)1198 void z_impl_k_wakeup(k_tid_t thread)
1199 {
1200 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1201 
1202 	(void)z_abort_thread_timeout(thread);
1203 
1204 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
1205 
1206 	if (!z_is_thread_sleeping(thread)) {
1207 		k_spin_unlock(&_sched_spinlock, key);
1208 		return;
1209 	}
1210 
1211 	z_mark_thread_as_not_sleeping(thread);
1212 
1213 	ready_thread(thread);
1214 
1215 	if (arch_is_in_isr()) {
1216 		k_spin_unlock(&_sched_spinlock, key);
1217 	} else {
1218 		z_reschedule(&_sched_spinlock, key);
1219 	}
1220 }
1221 
1222 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1223 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1224 {
1225 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1226 	z_impl_k_wakeup(thread);
1227 }
1228 #include <zephyr/syscalls/k_wakeup_mrsh.c>
1229 #endif /* CONFIG_USERSPACE */
1230 
z_impl_k_sched_current_thread_query(void)1231 k_tid_t z_impl_k_sched_current_thread_query(void)
1232 {
1233 	return arch_current_thread();
1234 }
1235 
1236 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1237 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1238 {
1239 	return z_impl_k_sched_current_thread_query();
1240 }
1241 #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
1242 #endif /* CONFIG_USERSPACE */
1243 
unpend_all(_wait_q_t * wait_q)1244 static inline void unpend_all(_wait_q_t *wait_q)
1245 {
1246 	struct k_thread *thread;
1247 
1248 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
1249 		unpend_thread_no_timeout(thread);
1250 		(void)z_abort_thread_timeout(thread);
1251 		arch_thread_return_value_set(thread, 0);
1252 		ready_thread(thread);
1253 	}
1254 }
1255 
1256 #ifdef CONFIG_THREAD_ABORT_HOOK
1257 extern void thread_abort_hook(struct k_thread *thread);
1258 #endif /* CONFIG_THREAD_ABORT_HOOK */
1259 
1260 /**
1261  * @brief Dequeues the specified thread
1262  *
1263  * Dequeues the specified thread and move it into the specified new state.
1264  *
1265  * @param thread Identify the thread to halt
1266  * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1267  */
halt_thread(struct k_thread * thread,uint8_t new_state)1268 static void halt_thread(struct k_thread *thread, uint8_t new_state)
1269 {
1270 	bool dummify = false;
1271 
1272 	/* We hold the lock, and the thread is known not to be running
1273 	 * anywhere.
1274 	 */
1275 	if ((thread->base.thread_state & new_state) == 0U) {
1276 		thread->base.thread_state |= new_state;
1277 		if (z_is_thread_queued(thread)) {
1278 			dequeue_thread(thread);
1279 		}
1280 
1281 		if (new_state == _THREAD_DEAD) {
1282 			if (thread->base.pended_on != NULL) {
1283 				unpend_thread_no_timeout(thread);
1284 			}
1285 			(void)z_abort_thread_timeout(thread);
1286 			unpend_all(&thread->join_queue);
1287 
1288 			/* Edge case: aborting arch_current_thread() from within an
1289 			 * ISR that preempted it requires clearing the
1290 			 * arch_current_thread() pointer so the upcoming context
1291 			 * switch doesn't clobber the now-freed
1292 			 * memory
1293 			 */
1294 			if (thread == arch_current_thread() && arch_is_in_isr()) {
1295 				dummify = true;
1296 			}
1297 		}
1298 #ifdef CONFIG_SMP
1299 		unpend_all(&thread->halt_queue);
1300 #endif /* CONFIG_SMP */
1301 		update_cache(1);
1302 
1303 		if (new_state == _THREAD_SUSPENDED) {
1304 			clear_halting(thread);
1305 			return;
1306 		}
1307 
1308 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1309 		arch_float_disable(thread);
1310 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
1311 
1312 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1313 
1314 		z_thread_monitor_exit(thread);
1315 #ifdef CONFIG_THREAD_ABORT_HOOK
1316 		thread_abort_hook(thread);
1317 #endif /* CONFIG_THREAD_ABORT_HOOK */
1318 
1319 #ifdef CONFIG_OBJ_CORE_THREAD
1320 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1321 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1322 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
1323 		k_obj_core_unlink(K_OBJ_CORE(thread));
1324 #endif /* CONFIG_OBJ_CORE_THREAD */
1325 
1326 #ifdef CONFIG_USERSPACE
1327 		z_mem_domain_exit_thread(thread);
1328 		k_thread_perms_all_clear(thread);
1329 		k_object_uninit(thread->stack_obj);
1330 		k_object_uninit(thread);
1331 #endif /* CONFIG_USERSPACE */
1332 
1333 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1334 		k_thread_abort_cleanup(thread);
1335 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1336 
1337 		/* Do this "set arch_current_thread() to dummy" step last so that
1338 		 * subsystems above can rely on arch_current_thread() being
1339 		 * unchanged.  Disabled for posix as that arch
1340 		 * continues to use the arch_current_thread() pointer in its swap
1341 		 * code.  Note that we must leave a non-null switch
1342 		 * handle for any threads spinning in join() (this can
1343 		 * never be used, as our thread is flagged dead, but
1344 		 * it must not be NULL otherwise join can deadlock).
1345 		 */
1346 		if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1347 #ifdef CONFIG_USE_SWITCH
1348 			arch_current_thread()->switch_handle = arch_current_thread();
1349 #endif
1350 			z_dummy_thread_init(&_thread_dummy);
1351 
1352 		}
1353 
1354 		/* Finally update the halting thread state, on which
1355 		 * other CPUs might be spinning (see
1356 		 * thread_halt_spin()).
1357 		 */
1358 		clear_halting(thread);
1359 	}
1360 }
1361 
z_thread_abort(struct k_thread * thread)1362 void z_thread_abort(struct k_thread *thread)
1363 {
1364 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1365 
1366 	if (z_is_thread_essential(thread)) {
1367 		k_spin_unlock(&_sched_spinlock, key);
1368 		__ASSERT(false, "aborting essential thread %p", thread);
1369 		k_panic();
1370 		return;
1371 	}
1372 
1373 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1374 		k_spin_unlock(&_sched_spinlock, key);
1375 		return;
1376 	}
1377 
1378 	z_thread_halt(thread, key, true);
1379 }
1380 
1381 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(k_tid_t thread)1382 void z_impl_k_thread_abort(k_tid_t thread)
1383 {
1384 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1385 
1386 	z_thread_abort(thread);
1387 
1388 	__ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1389 
1390 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1391 }
1392 #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
1393 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1394 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1395 {
1396 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1397 	int ret;
1398 
1399 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1400 
1401 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1402 		z_sched_switch_spin(thread);
1403 		ret = 0;
1404 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1405 		ret = -EBUSY;
1406 	} else if ((thread == arch_current_thread()) ||
1407 		   (thread->base.pended_on == &arch_current_thread()->join_queue)) {
1408 		ret = -EDEADLK;
1409 	} else {
1410 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1411 		add_to_waitq_locked(arch_current_thread(), &thread->join_queue);
1412 		add_thread_timeout(arch_current_thread(), timeout);
1413 
1414 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1415 		ret = z_swap(&_sched_spinlock, key);
1416 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1417 
1418 		return ret;
1419 	}
1420 
1421 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1422 
1423 	k_spin_unlock(&_sched_spinlock, key);
1424 	return ret;
1425 }
1426 
1427 #ifdef CONFIG_USERSPACE
1428 /* Special case: don't oops if the thread is uninitialized.  This is because
1429  * the initialization bit does double-duty for thread objects; if false, means
1430  * the thread object is truly uninitialized, or the thread ran and exited for
1431  * some reason.
1432  *
1433  * Return true in this case indicating we should just do nothing and return
1434  * success to the caller.
1435  */
thread_obj_validate(struct k_thread * thread)1436 static bool thread_obj_validate(struct k_thread *thread)
1437 {
1438 	struct k_object *ko = k_object_find(thread);
1439 	int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1440 
1441 	switch (ret) {
1442 	case 0:
1443 		return false;
1444 	case -EINVAL:
1445 		return true;
1446 	default:
1447 #ifdef CONFIG_LOG
1448 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1449 #endif /* CONFIG_LOG */
1450 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1451 	}
1452 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1453 }
1454 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1455 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1456 				       k_timeout_t timeout)
1457 {
1458 	if (thread_obj_validate(thread)) {
1459 		return 0;
1460 	}
1461 
1462 	return z_impl_k_thread_join(thread, timeout);
1463 }
1464 #include <zephyr/syscalls/k_thread_join_mrsh.c>
1465 
z_vrfy_k_thread_abort(k_tid_t thread)1466 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1467 {
1468 	if (thread_obj_validate(thread)) {
1469 		return;
1470 	}
1471 
1472 	K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
1473 				    "aborting essential thread %p", thread));
1474 
1475 	z_impl_k_thread_abort((struct k_thread *)thread);
1476 }
1477 #include <zephyr/syscalls/k_thread_abort_mrsh.c>
1478 #endif /* CONFIG_USERSPACE */
1479 
1480 /*
1481  * future scheduler.h API implementations
1482  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1483 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1484 {
1485 	struct k_thread *thread;
1486 	bool ret = false;
1487 
1488 	K_SPINLOCK(&_sched_spinlock) {
1489 		thread = _priq_wait_best(&wait_q->waitq);
1490 
1491 		if (thread != NULL) {
1492 			z_thread_return_value_set_with_data(thread,
1493 							    swap_retval,
1494 							    swap_data);
1495 			unpend_thread_no_timeout(thread);
1496 			(void)z_abort_thread_timeout(thread);
1497 			ready_thread(thread);
1498 			ret = true;
1499 		}
1500 	}
1501 
1502 	return ret;
1503 }
1504 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1505 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1506 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1507 {
1508 	int ret = z_pend_curr(lock, key, wait_q, timeout);
1509 
1510 	if (data != NULL) {
1511 		*data = arch_current_thread()->base.swap_data;
1512 	}
1513 	return ret;
1514 }
1515 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1516 int z_sched_waitq_walk(_wait_q_t  *wait_q,
1517 		       int (*func)(struct k_thread *, void *), void *data)
1518 {
1519 	struct k_thread *thread;
1520 	int  status = 0;
1521 
1522 	K_SPINLOCK(&_sched_spinlock) {
1523 		_WAIT_Q_FOR_EACH(wait_q, thread) {
1524 
1525 			/*
1526 			 * Invoke the callback function on each waiting thread
1527 			 * for as long as there are both waiting threads AND
1528 			 * it returns 0.
1529 			 */
1530 
1531 			status = func(thread, data);
1532 			if (status != 0) {
1533 				break;
1534 			}
1535 		}
1536 	}
1537 
1538 	return status;
1539 }
1540 
1541 /* This routine exists for benchmarking purposes. It is not used in
1542  * general production code.
1543  */
z_unready_thread(struct k_thread * thread)1544 void z_unready_thread(struct k_thread *thread)
1545 {
1546 	K_SPINLOCK(&_sched_spinlock) {
1547 		unready_thread(thread);
1548 	}
1549 }
1550