1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <wait_q.h>
10 #include <kthread.h>
11 #include <priority_q.h>
12 #include <kswap.h>
13 #include <ipi.h>
14 #include <kernel_arch_func.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/drivers/timer/system_timer.h>
17 #include <stdbool.h>
18 #include <kernel_internal.h>
19 #include <zephyr/logging/log.h>
20 #include <zephyr/sys/atomic.h>
21 #include <zephyr/sys/math_extras.h>
22 #include <zephyr/timing/timing.h>
23 #include <zephyr/sys/util.h>
24 
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26 
27 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28 extern struct k_thread *pending_current;
29 #endif
30 
31 struct k_spinlock _sched_spinlock;
32 
33 /* Storage to "complete" the context switch from an invalid/incomplete thread
34  * context (ex: exiting an ISR that aborted _current)
35  */
36 __incoherent struct k_thread _thread_dummy;
37 
38 static ALWAYS_INLINE void update_cache(int preempt_ok);
39 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
41 
42 
43 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 	     "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 	     "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 	     "threads.");
47 
thread_runq(struct k_thread * thread)48 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
49 {
50 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
51 	int cpu, m = thread->base.cpu_mask;
52 
53 	/* Edge case: it's legal per the API to "make runnable" a
54 	 * thread with all CPUs masked off (i.e. one that isn't
55 	 * actually runnable!).  Sort of a wart in the API and maybe
56 	 * we should address this in docs/assertions instead to avoid
57 	 * the extra test.
58 	 */
59 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
60 
61 	return &_kernel.cpus[cpu].ready_q.runq;
62 #else
63 	ARG_UNUSED(thread);
64 	return &_kernel.ready_q.runq;
65 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
66 }
67 
curr_cpu_runq(void)68 static ALWAYS_INLINE void *curr_cpu_runq(void)
69 {
70 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
71 	return &arch_curr_cpu()->ready_q.runq;
72 #else
73 	return &_kernel.ready_q.runq;
74 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
75 }
76 
runq_add(struct k_thread * thread)77 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
78 {
79 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
80 	__ASSERT_NO_MSG(!is_thread_dummy(thread));
81 
82 	_priq_run_add(thread_runq(thread), thread);
83 }
84 
runq_remove(struct k_thread * thread)85 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
86 {
87 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
88 	__ASSERT_NO_MSG(!is_thread_dummy(thread));
89 
90 	_priq_run_remove(thread_runq(thread), thread);
91 }
92 
runq_yield(void)93 static ALWAYS_INLINE void runq_yield(void)
94 {
95 	_priq_run_yield(curr_cpu_runq());
96 }
97 
runq_best(void)98 static ALWAYS_INLINE struct k_thread *runq_best(void)
99 {
100 	return _priq_run_best(curr_cpu_runq());
101 }
102 
103 /* _current is never in the run queue until context switch on
104  * SMP configurations, see z_requeue_current()
105  */
should_queue_thread(struct k_thread * thread)106 static inline bool should_queue_thread(struct k_thread *thread)
107 {
108 	return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
109 }
110 
queue_thread(struct k_thread * thread)111 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
112 {
113 	z_mark_thread_as_queued(thread);
114 	if (should_queue_thread(thread)) {
115 		runq_add(thread);
116 	}
117 #ifdef CONFIG_SMP
118 	if (thread == _current) {
119 		/* add current to end of queue means "yield" */
120 		_current_cpu->swap_ok = true;
121 	}
122 #endif /* CONFIG_SMP */
123 }
124 
dequeue_thread(struct k_thread * thread)125 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
126 {
127 	z_mark_thread_as_not_queued(thread);
128 	if (should_queue_thread(thread)) {
129 		runq_remove(thread);
130 	}
131 }
132 
133 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
134  * never live in the run queue until we are inexorably on the context
135  * switch path on SMP, otherwise there is a deadlock condition where a
136  * set of CPUs pick a cycle of threads to run and wait for them all to
137  * context switch forever.
138  */
z_requeue_current(struct k_thread * thread)139 void z_requeue_current(struct k_thread *thread)
140 {
141 	if (z_is_thread_queued(thread)) {
142 		runq_add(thread);
143 	}
144 	signal_pending_ipi();
145 }
146 
147 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)148 static inline void clear_halting(struct k_thread *thread)
149 {
150 	if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
151 		barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
152 		thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
153 	}
154 }
155 
156 /* Track cooperative threads preempted by metairqs so we can return to
157  * them specifically.  Called at the moment a new thread has been
158  * selected to run.
159  */
update_metairq_preempt(struct k_thread * thread)160 static void update_metairq_preempt(struct k_thread *thread)
161 {
162 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0)
163 	if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
164 	    !thread_is_preemptible(_current)) {
165 		/* Record new preemption */
166 		_current_cpu->metairq_preempted = _current;
167 	} else if (!thread_is_metairq(thread)) {
168 		/* Returning from existing preemption */
169 		_current_cpu->metairq_preempted = NULL;
170 	}
171 #else
172 	ARG_UNUSED(thread);
173 #endif /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 */
174 }
175 
next_up(void)176 static ALWAYS_INLINE struct k_thread *next_up(void)
177 {
178 #ifdef CONFIG_SMP
179 	if (z_is_thread_halting(_current)) {
180 		halt_thread(_current, z_is_thread_aborting(_current) ?
181 				      _THREAD_DEAD : _THREAD_SUSPENDED);
182 	}
183 #endif /* CONFIG_SMP */
184 
185 	struct k_thread *thread = runq_best();
186 
187 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0)
188 	/* MetaIRQs must always attempt to return back to a
189 	 * cooperative thread they preempted and not whatever happens
190 	 * to be highest priority now. The cooperative thread was
191 	 * promised it wouldn't be preempted (by non-metairq threads)!
192 	 */
193 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
194 
195 	if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
196 		if (z_is_thread_ready(mirqp)) {
197 			thread = mirqp;
198 		} else {
199 			_current_cpu->metairq_preempted = NULL;
200 		}
201 	}
202 #endif /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 */
203 
204 #ifndef CONFIG_SMP
205 	/* In uniprocessor mode, we can leave the current thread in
206 	 * the queue (actually we have to, otherwise the assembly
207 	 * context switch code for all architectures would be
208 	 * responsible for putting it back in z_swap and ISR return!),
209 	 * which makes this choice simple.
210 	 */
211 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
212 #else
213 	/* Under SMP, the "cache" mechanism for selecting the next
214 	 * thread doesn't work, so we have more work to do to test
215 	 * _current against the best choice from the queue.  Here, the
216 	 * thread selected above represents "the best thread that is
217 	 * not current".
218 	 *
219 	 * Subtle note on "queued": in SMP mode, neither _current nor
220 	 * metairq_premepted live in the queue, so this isn't exactly the
221 	 * same thing as "ready", it means "the thread already been
222 	 * added back to the queue such that we don't want to re-add it".
223 	 */
224 	bool queued = z_is_thread_queued(_current);
225 	bool active = z_is_thread_ready(_current);
226 
227 	if (thread == NULL) {
228 		thread = _current_cpu->idle_thread;
229 	}
230 
231 	if (active) {
232 		int32_t cmp = z_sched_prio_cmp(_current, thread);
233 
234 		/* Ties only switch if state says we yielded */
235 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
236 			thread = _current;
237 		}
238 
239 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
240 			thread = _current;
241 		}
242 	}
243 
244 	if (thread != _current) {
245 		update_metairq_preempt(thread);
246 		/*
247 		 * Put _current back into the queue unless it is ..
248 		 * 1. not active (i.e., blocked, suspended, dead), or
249 		 * 2. already queued, or
250 		 * 3. the idle thread, or
251 		 * 4. preempted by a MetaIRQ thread
252 		 */
253 		if (active && !queued && !z_is_idle_thread_object(_current)
254 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0)
255 		    && (_current != _current_cpu->metairq_preempted)
256 #endif
257 		   ) {
258 			queue_thread(_current);
259 		}
260 	}
261 
262 	/* Take the new _current out of the queue */
263 	if (z_is_thread_queued(thread)) {
264 		dequeue_thread(thread);
265 	}
266 
267 	_current_cpu->swap_ok = false;
268 	return thread;
269 #endif /* CONFIG_SMP */
270 }
271 
move_current_to_end_of_prio_q(void)272 void move_current_to_end_of_prio_q(void)
273 {
274 	runq_yield();
275 
276 	update_cache(1);
277 }
278 
update_cache(int preempt_ok)279 static ALWAYS_INLINE void update_cache(int preempt_ok)
280 {
281 #ifndef CONFIG_SMP
282 	struct k_thread *thread = next_up();
283 
284 	if (should_preempt(thread, preempt_ok)) {
285 #ifdef CONFIG_TIMESLICING
286 		if (thread != _current) {
287 			z_reset_time_slice(thread);
288 		}
289 #endif /* CONFIG_TIMESLICING */
290 		update_metairq_preempt(thread);
291 		_kernel.ready_q.cache = thread;
292 	} else {
293 		_kernel.ready_q.cache = _current;
294 	}
295 
296 #else
297 	/* The way this works is that the CPU record keeps its
298 	 * "cooperative swapping is OK" flag until the next reschedule
299 	 * call or context switch.  It doesn't need to be tracked per
300 	 * thread because if the thread gets preempted for whatever
301 	 * reason the scheduler will make the same decision anyway.
302 	 */
303 	_current_cpu->swap_ok = preempt_ok;
304 #endif /* CONFIG_SMP */
305 }
306 
thread_active_elsewhere(struct k_thread * thread)307 static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
308 {
309 	/* Returns pointer to _cpu if the thread is currently running on
310 	 * another CPU. There are more scalable designs to answer this
311 	 * question in constant time, but this is fine for now.
312 	 */
313 #ifdef CONFIG_SMP
314 	int currcpu = _current_cpu->id;
315 
316 	unsigned int num_cpus = arch_num_cpus();
317 
318 	for (int i = 0; i < num_cpus; i++) {
319 		if ((i != currcpu) &&
320 		    (_kernel.cpus[i].current == thread)) {
321 			return &_kernel.cpus[i];
322 		}
323 	}
324 #endif /* CONFIG_SMP */
325 	ARG_UNUSED(thread);
326 	return NULL;
327 }
328 
ready_thread(struct k_thread * thread)329 static void ready_thread(struct k_thread *thread)
330 {
331 #ifdef CONFIG_KERNEL_COHERENCE
332 	__ASSERT_NO_MSG(sys_cache_is_mem_coherent(thread));
333 #endif /* CONFIG_KERNEL_COHERENCE */
334 
335 	/* If thread is queued already, do not try and added it to the
336 	 * run queue again
337 	 */
338 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
339 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
340 
341 		queue_thread(thread);
342 		update_cache(0);
343 
344 		flag_ipi(ipi_mask_create(thread));
345 	}
346 }
347 
z_ready_thread(struct k_thread * thread)348 void z_ready_thread(struct k_thread *thread)
349 {
350 	K_SPINLOCK(&_sched_spinlock) {
351 		if (thread_active_elsewhere(thread) == NULL) {
352 			ready_thread(thread);
353 		}
354 	}
355 }
356 
357 /* This routine only used for testing purposes */
z_yield_testing_only(void)358 void z_yield_testing_only(void)
359 {
360 	K_SPINLOCK(&_sched_spinlock) {
361 		move_current_to_end_of_prio_q();
362 	}
363 }
364 
365 /* Spins in ISR context, waiting for a thread known to be running on
366  * another CPU to catch the IPI we sent and halt.  Note that we check
367  * for ourselves being asynchronously halted first to prevent simple
368  * deadlocks (but not complex ones involving cycles of 3+ threads!).
369  * Acts to release the provided lock before returning.
370  */
thread_halt_spin(struct k_thread * thread,k_spinlock_key_t key)371 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
372 {
373 	if (z_is_thread_halting(_current)) {
374 		halt_thread(_current,
375 			    z_is_thread_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
376 	}
377 	k_spin_unlock(&_sched_spinlock, key);
378 	while (z_is_thread_halting(thread)) {
379 		unsigned int k = arch_irq_lock();
380 
381 		arch_spin_relax(); /* Requires interrupts be masked */
382 		arch_irq_unlock(k);
383 	}
384 }
385 
386 /**
387  * If the specified thread is recorded as being preempted by a meta IRQ thread,
388  * clear that record.
389  */
z_metairq_preempted_clear(struct k_thread * thread)390 static ALWAYS_INLINE void z_metairq_preempted_clear(struct k_thread *thread)
391 {
392 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0)
393 	for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
394 		if (_kernel.cpus[i].metairq_preempted == thread) {
395 			_kernel.cpus[i].metairq_preempted = NULL;
396 			break;
397 		}
398 	}
399 #endif
400 }
401 
402 /* Shared handler for k_thread_{suspend,abort}().  Called with the
403  * scheduler lock held and the key passed (which it may
404  * release/reacquire!) which will be released before a possible return
405  * (aborting _current will not return, obviously), which may be after
406  * a context switch.
407  */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)408 static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
409 					bool terminate)
410 {
411 	_wait_q_t *wq = &thread->join_queue;
412 #ifdef CONFIG_SMP
413 	wq = terminate ? wq : &thread->halt_queue;
414 #endif
415 
416 	z_metairq_preempted_clear(thread);
417 
418 	/* If the target is a thread running on another CPU, flag and
419 	 * poke (note that we might spin to wait, so a true
420 	 * synchronous IPI is needed here, not deferred!), it will
421 	 * halt itself in the IPI.  Otherwise it's unscheduled, so we
422 	 * can clean it up directly.
423 	 */
424 
425 	struct _cpu *cpu = thread_active_elsewhere(thread);
426 
427 	if (cpu != NULL) {
428 		thread->base.thread_state |= (terminate ? _THREAD_ABORTING
429 					      : _THREAD_SUSPENDING);
430 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
431 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
432 		arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
433 #else
434 		arch_sched_broadcast_ipi();
435 #endif /* CONFIG_ARCH_HAS_DIRECTED_IPIS */
436 #endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
437 		if (arch_is_in_isr()) {
438 			thread_halt_spin(thread, key);
439 		} else  {
440 			add_to_waitq_locked(_current, wq);
441 			z_swap(&_sched_spinlock, key);
442 		}
443 	} else {
444 		halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
445 		if ((thread == _current) && !arch_is_in_isr()) {
446 			if (z_is_thread_essential(thread)) {
447 				k_spin_unlock(&_sched_spinlock, key);
448 				k_panic();
449 				key = k_spin_lock(&_sched_spinlock);
450 			}
451 			z_swap(&_sched_spinlock, key);
452 			__ASSERT(!terminate, "aborted _current back from dead");
453 		} else {
454 			k_spin_unlock(&_sched_spinlock, key);
455 		}
456 	}
457 	/* NOTE: the scheduler lock has been released.  Don't put
458 	 * logic here, it's likely to be racy/deadlocky even if you
459 	 * re-take the lock!
460 	 */
461 }
462 
463 
z_impl_k_thread_suspend(k_tid_t thread)464 void z_impl_k_thread_suspend(k_tid_t thread)
465 {
466 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
467 
468 	/* Special case "suspend the current thread" as it doesn't
469 	 * need the async complexity below.
470 	 */
471 	if (!IS_ENABLED(CONFIG_SMP) && (thread == _current) && !arch_is_in_isr()) {
472 		k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
473 
474 		z_mark_thread_as_suspended(thread);
475 		z_metairq_preempted_clear(thread);
476 		dequeue_thread(thread);
477 		update_cache(1);
478 		z_swap(&_sched_spinlock, key);
479 		return;
480 	}
481 
482 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
483 
484 	if (unlikely(z_is_thread_suspended(thread))) {
485 
486 		/* The target thread is already suspended. Nothing to do. */
487 
488 		k_spin_unlock(&_sched_spinlock, key);
489 		return;
490 	}
491 
492 	z_thread_halt(thread, key, false);
493 
494 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
495 }
496 
497 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(k_tid_t thread)498 static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
499 {
500 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
501 	z_impl_k_thread_suspend(thread);
502 }
503 #include <zephyr/syscalls/k_thread_suspend_mrsh.c>
504 #endif /* CONFIG_USERSPACE */
505 
z_impl_k_thread_resume(k_tid_t thread)506 void z_impl_k_thread_resume(k_tid_t thread)
507 {
508 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
509 
510 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
511 
512 	/* Do not try to resume a thread that was not suspended */
513 	if (unlikely(!z_is_thread_suspended(thread))) {
514 		k_spin_unlock(&_sched_spinlock, key);
515 		return;
516 	}
517 
518 	z_mark_thread_as_not_suspended(thread);
519 	ready_thread(thread);
520 
521 	z_reschedule(&_sched_spinlock, key);
522 
523 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
524 }
525 
526 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(k_tid_t thread)527 static inline void z_vrfy_k_thread_resume(k_tid_t thread)
528 {
529 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
530 	z_impl_k_thread_resume(thread);
531 }
532 #include <zephyr/syscalls/k_thread_resume_mrsh.c>
533 #endif /* CONFIG_USERSPACE */
534 
unready_thread(struct k_thread * thread)535 static void unready_thread(struct k_thread *thread)
536 {
537 	if (z_is_thread_queued(thread)) {
538 		dequeue_thread(thread);
539 	}
540 	update_cache(thread == _current);
541 }
542 
543 /* _sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)544 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
545 {
546 	unready_thread(thread);
547 	z_mark_thread_as_pending(thread);
548 
549 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
550 
551 	if (wait_q != NULL) {
552 		thread->base.pended_on = wait_q;
553 		_priq_wait_add(&wait_q->waitq, thread);
554 	}
555 }
556 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)557 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
558 {
559 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
560 		z_add_thread_timeout(thread, timeout);
561 	}
562 }
563 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)564 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
565 			k_timeout_t timeout)
566 {
567 #ifdef CONFIG_KERNEL_COHERENCE
568 	__ASSERT_NO_MSG(wait_q == NULL || sys_cache_is_mem_coherent(wait_q));
569 #endif /* CONFIG_KERNEL_COHERENCE */
570 	add_to_waitq_locked(thread, wait_q);
571 	add_thread_timeout(thread, timeout);
572 }
573 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)574 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
575 		   k_timeout_t timeout)
576 {
577 	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
578 	K_SPINLOCK(&_sched_spinlock) {
579 		pend_locked(thread, wait_q, timeout);
580 	}
581 }
582 
z_unpend_thread_no_timeout(struct k_thread * thread)583 void z_unpend_thread_no_timeout(struct k_thread *thread)
584 {
585 	K_SPINLOCK(&_sched_spinlock) {
586 		if (thread->base.pended_on != NULL) {
587 			unpend_thread_no_timeout(thread);
588 		}
589 	}
590 }
591 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)592 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
593 {
594 	K_SPINLOCK(&_sched_spinlock) {
595 		bool killed = (thread->base.thread_state &
596 				(_THREAD_DEAD | _THREAD_ABORTING));
597 
598 #ifdef CONFIG_EVENTS
599 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
600 
601 		thread->no_wake_on_timeout = false;
602 
603 		if (do_nothing) {
604 			continue;
605 		}
606 #endif /* CONFIG_EVENTS */
607 
608 		if (!killed) {
609 			/* The thread is not being killed */
610 			if (thread->base.pended_on != NULL) {
611 				unpend_thread_no_timeout(thread);
612 			}
613 			z_mark_thread_as_not_sleeping(thread);
614 			ready_thread(thread);
615 		}
616 	}
617 
618 }
619 
620 #ifdef CONFIG_SYS_CLOCK_EXISTS
621 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)622 void z_thread_timeout(struct _timeout *timeout)
623 {
624 	struct k_thread *thread = CONTAINER_OF(timeout,
625 					       struct k_thread, base.timeout);
626 
627 	z_sched_wake_thread(thread, true);
628 }
629 #endif /* CONFIG_SYS_CLOCK_EXISTS */
630 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)631 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
632 	       _wait_q_t *wait_q, k_timeout_t timeout)
633 {
634 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
635 	pending_current = _current;
636 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
637 	__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
638 
639 	/* We do a "lock swap" prior to calling z_swap(), such that
640 	 * the caller's lock gets released as desired.  But we ensure
641 	 * that we hold the scheduler lock and leave local interrupts
642 	 * masked until we reach the context switch.  z_swap() itself
643 	 * has similar code; the duplication is because it's a legacy
644 	 * API that doesn't expect to be called with scheduler lock
645 	 * held.
646 	 */
647 	(void) k_spin_lock(&_sched_spinlock);
648 	pend_locked(_current, wait_q, timeout);
649 	k_spin_release(lock);
650 	return z_swap(&_sched_spinlock, key);
651 }
652 
z_unpend1_no_timeout(_wait_q_t * wait_q)653 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
654 {
655 	struct k_thread *thread = NULL;
656 
657 	K_SPINLOCK(&_sched_spinlock) {
658 		thread = _priq_wait_best(&wait_q->waitq);
659 
660 		if (thread != NULL) {
661 			unpend_thread_no_timeout(thread);
662 		}
663 	}
664 
665 	return thread;
666 }
667 
z_unpend_thread(struct k_thread * thread)668 void z_unpend_thread(struct k_thread *thread)
669 {
670 	z_unpend_thread_no_timeout(thread);
671 	z_abort_thread_timeout(thread);
672 }
673 
674 /* Priority set utility that does no rescheduling, it just changes the
675  * run queue state, returning true if a reschedule is needed later.
676  */
z_thread_prio_set(struct k_thread * thread,int prio)677 bool z_thread_prio_set(struct k_thread *thread, int prio)
678 {
679 	bool need_sched = 0;
680 	int old_prio = thread->base.prio;
681 
682 	K_SPINLOCK(&_sched_spinlock) {
683 		need_sched = z_is_thread_ready(thread);
684 
685 		if (need_sched) {
686 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
687 				dequeue_thread(thread);
688 				thread->base.prio = prio;
689 				queue_thread(thread);
690 
691 				if (old_prio > prio) {
692 					flag_ipi(ipi_mask_create(thread));
693 				}
694 			} else {
695 				/*
696 				 * This is a running thread on SMP. Update its
697 				 * priority, but do not requeue it. An IPI is
698 				 * needed if the priority is both being lowered
699 				 * and it is running on another CPU.
700 				 */
701 
702 				thread->base.prio = prio;
703 
704 				struct _cpu *cpu;
705 
706 				cpu = thread_active_elsewhere(thread);
707 				if ((cpu != NULL) && (old_prio < prio)) {
708 					flag_ipi(IPI_CPU_MASK(cpu->id));
709 				}
710 			}
711 
712 			update_cache(1);
713 		} else if (z_is_thread_pending(thread)) {
714 			/* Thread is pending, remove it from the waitq
715 			 * and reinsert it with the new priority to avoid
716 			 * violating waitq ordering and rb assumptions.
717 			 */
718 			_wait_q_t *wait_q = pended_on_thread(thread);
719 
720 			_priq_wait_remove(&wait_q->waitq, thread);
721 			thread->base.prio = prio;
722 			_priq_wait_add(&wait_q->waitq, thread);
723 		} else {
724 			thread->base.prio = prio;
725 		}
726 	}
727 
728 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
729 
730 	return need_sched;
731 }
732 
resched(uint32_t key)733 static inline bool resched(uint32_t key)
734 {
735 #ifdef CONFIG_SMP
736 	_current_cpu->swap_ok = 0;
737 #endif /* CONFIG_SMP */
738 
739 	return arch_irq_unlocked(key) && !arch_is_in_isr();
740 }
741 
742 /*
743  * Check if the next ready thread is the same as the current thread
744  * and save the trip if true.
745  */
need_swap(void)746 static inline bool need_swap(void)
747 {
748 	/* the SMP case will be handled in C based z_swap() */
749 #ifdef CONFIG_SMP
750 	return true;
751 #else
752 	struct k_thread *new_thread;
753 
754 	/* Check if the next ready thread is the same as the current thread */
755 	new_thread = _kernel.ready_q.cache;
756 	return new_thread != _current;
757 #endif /* CONFIG_SMP */
758 }
759 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)760 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
761 {
762 	if (resched(key.key) && need_swap()) {
763 		z_swap(lock, key);
764 	} else {
765 		k_spin_unlock(lock, key);
766 		signal_pending_ipi();
767 	}
768 }
769 
z_reschedule_irqlock(uint32_t key)770 void z_reschedule_irqlock(uint32_t key)
771 {
772 	if (resched(key) && need_swap()) {
773 		z_swap_irqlock(key);
774 	} else {
775 		irq_unlock(key);
776 		signal_pending_ipi();
777 	}
778 }
779 
k_sched_lock(void)780 void k_sched_lock(void)
781 {
782 	LOG_DBG("scheduler locked (%p:%d)",
783 		_current, _current->base.sched_locked);
784 
785 	K_SPINLOCK(&_sched_spinlock) {
786 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
787 
788 		__ASSERT(!arch_is_in_isr(), "");
789 		__ASSERT(_current->base.sched_locked != 1U, "");
790 
791 		--_current->base.sched_locked;
792 
793 		compiler_barrier();
794 	}
795 }
796 
k_sched_unlock(void)797 void k_sched_unlock(void)
798 {
799 	K_SPINLOCK(&_sched_spinlock) {
800 		__ASSERT(_current->base.sched_locked != 0U, "");
801 		__ASSERT(!arch_is_in_isr(), "");
802 
803 		++_current->base.sched_locked;
804 		update_cache(0);
805 	}
806 
807 	LOG_DBG("scheduler unlocked (%p:%d)",
808 		_current, _current->base.sched_locked);
809 
810 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
811 
812 	z_reschedule_unlocked();
813 }
814 
z_swap_next_thread(void)815 struct k_thread *z_swap_next_thread(void)
816 {
817 #ifdef CONFIG_SMP
818 	struct k_thread *ret = next_up();
819 
820 	if (ret == _current) {
821 		/* When not swapping, have to signal IPIs here.  In
822 		 * the context switch case it must happen later, after
823 		 * _current gets requeued.
824 		 */
825 		signal_pending_ipi();
826 	}
827 	return ret;
828 #else
829 	return _kernel.ready_q.cache;
830 #endif /* CONFIG_SMP */
831 }
832 
833 #ifdef CONFIG_USE_SWITCH
834 /* Just a wrapper around z_current_thread_set(xxx) with tracing */
set_current(struct k_thread * new_thread)835 static inline void set_current(struct k_thread *new_thread)
836 {
837 	/* If the new thread is the same as the current thread, we
838 	 * don't need to do anything.
839 	 */
840 	if (IS_ENABLED(CONFIG_INSTRUMENT_THREAD_SWITCHING) && new_thread != _current) {
841 		z_thread_mark_switched_out();
842 	}
843 	z_current_thread_set(new_thread);
844 }
845 
846 /**
847  * @brief Determine next thread to execute upon completion of an interrupt
848  *
849  * Thread preemption is performed by context switching after the completion
850  * of a non-recursed interrupt. This function determines which thread to
851  * switch to if any. This function accepts as @p interrupted either:
852  *
853  * - The handle for the interrupted thread in which case the thread's context
854  *   must already be fully saved and ready to be picked up by a different CPU.
855  *
856  * - NULL if more work is required to fully save the thread's state after
857  *   it is known that a new thread is to be scheduled. It is up to the caller
858  *   to store the handle resulting from the thread that is being switched out
859  *   in that thread's "switch_handle" field after its
860  *   context has fully been saved, following the same requirements as with
861  *   the @ref arch_switch() function.
862  *
863  * If a new thread needs to be scheduled then its handle is returned.
864  * Otherwise the same value provided as @p interrupted is returned back.
865  * Those handles are the same opaque types used by the @ref arch_switch()
866  * function.
867  *
868  * @warning
869  * The _current value may have changed after this call and not refer
870  * to the interrupted thread anymore. It might be necessary to make a local
871  * copy before calling this function.
872  *
873  * @param interrupted Handle for the thread that was interrupted or NULL.
874  * @retval Handle for the next thread to execute, or @p interrupted when
875  *         no new thread is to be scheduled.
876  */
z_get_next_switch_handle(void * interrupted)877 void *z_get_next_switch_handle(void *interrupted)
878 {
879 	z_check_stack_sentinel();
880 
881 #ifdef CONFIG_SMP
882 	void *ret = NULL;
883 
884 	K_SPINLOCK(&_sched_spinlock) {
885 		struct k_thread *old_thread = _current, *new_thread;
886 
887 		__ASSERT(old_thread->switch_handle == NULL || is_thread_dummy(old_thread),
888 			"old thread handle should be null.");
889 
890 		new_thread = next_up();
891 
892 		z_sched_usage_switch(new_thread);
893 
894 		if (old_thread != new_thread) {
895 			uint8_t  cpu_id;
896 
897 			z_sched_switch_spin(new_thread);
898 			arch_cohere_stacks(old_thread, interrupted, new_thread);
899 
900 			_current_cpu->swap_ok = 0;
901 			cpu_id = arch_curr_cpu()->id;
902 			new_thread->base.cpu = cpu_id;
903 			set_current(new_thread);
904 
905 #ifdef CONFIG_TIMESLICING
906 			z_reset_time_slice(new_thread);
907 #endif /* CONFIG_TIMESLICING */
908 
909 #ifdef CONFIG_SPIN_VALIDATE
910 			/* Changed _current!  Update the spinlock
911 			 * bookkeeping so the validation doesn't get
912 			 * confused when the "wrong" thread tries to
913 			 * release the lock.
914 			 */
915 			z_spin_lock_set_owner(&_sched_spinlock);
916 #endif /* CONFIG_SPIN_VALIDATE */
917 
918 			/* A queued (runnable) old/current thread
919 			 * needs to be added back to the run queue
920 			 * here, and atomically with its switch handle
921 			 * being set below.  This is safe now, as we
922 			 * will not return into it.
923 			 */
924 			if (z_is_thread_queued(old_thread)) {
925 #ifdef CONFIG_SCHED_IPI_CASCADE
926 				if ((new_thread->base.cpu_mask != -1) &&
927 				    (old_thread->base.cpu_mask != BIT(cpu_id))) {
928 					flag_ipi(ipi_mask_create(old_thread));
929 				}
930 #endif
931 				runq_add(old_thread);
932 			}
933 		}
934 		old_thread->switch_handle = interrupted;
935 		ret = new_thread->switch_handle;
936 		/* Active threads MUST have a null here */
937 		new_thread->switch_handle = NULL;
938 	}
939 	signal_pending_ipi();
940 	return ret;
941 #else
942 	z_sched_usage_switch(_kernel.ready_q.cache);
943 	_current->switch_handle = interrupted;
944 	set_current(_kernel.ready_q.cache);
945 	return _current->switch_handle;
946 #endif /* CONFIG_SMP */
947 }
948 #endif /* CONFIG_USE_SWITCH */
949 
z_unpend_all(_wait_q_t * wait_q)950 int z_unpend_all(_wait_q_t *wait_q)
951 {
952 	int need_sched = 0;
953 	struct k_thread *thread;
954 
955 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
956 		z_unpend_thread(thread);
957 		z_ready_thread(thread);
958 		need_sched = 1;
959 	}
960 
961 	return need_sched;
962 }
963 
init_ready_q(struct _ready_q * ready_q)964 void init_ready_q(struct _ready_q *ready_q)
965 {
966 	_priq_run_init(&ready_q->runq);
967 }
968 
z_sched_init(void)969 void z_sched_init(void)
970 {
971 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
972 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
973 		init_ready_q(&_kernel.cpus[i].ready_q);
974 	}
975 #else
976 	init_ready_q(&_kernel.ready_q);
977 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
978 }
979 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)980 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
981 {
982 	/*
983 	 * Use NULL, since we cannot know what the entry point is (we do not
984 	 * keep track of it) and idle cannot change its priority.
985 	 */
986 	Z_ASSERT_VALID_PRIO(prio, NULL);
987 
988 	bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
989 
990 	if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
991 			     (_current->base.sched_locked == 0U))) {
992 		z_reschedule_unlocked();
993 	}
994 }
995 
996 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)997 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
998 {
999 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1000 	K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
1001 				    "invalid thread priority %d", prio));
1002 #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
1003 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
1004 				    "thread priority may only be downgraded (%d < %d)",
1005 				    prio, thread->base.prio));
1006 #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
1007 	z_impl_k_thread_priority_set(thread, prio);
1008 }
1009 #include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
1010 #endif /* CONFIG_USERSPACE */
1011 
1012 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_absolute_deadline_set(k_tid_t tid,int deadline)1013 void z_impl_k_thread_absolute_deadline_set(k_tid_t tid, int deadline)
1014 {
1015 	struct k_thread *thread = tid;
1016 
1017 	/* The prio_deadline field changes the sorting order, so can't
1018 	 * change it while the thread is in the run queue (dlists
1019 	 * actually are benign as long as we requeue it before we
1020 	 * release the lock, but an rbtree will blow up if we break
1021 	 * sorting!)
1022 	 */
1023 	K_SPINLOCK(&_sched_spinlock) {
1024 		if (z_is_thread_queued(thread)) {
1025 			dequeue_thread(thread);
1026 			thread->base.prio_deadline = deadline;
1027 			queue_thread(thread);
1028 		} else {
1029 			thread->base.prio_deadline = deadline;
1030 		}
1031 	}
1032 }
1033 
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1034 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1035 {
1036 
1037 	deadline = clamp(deadline, 0, INT_MAX);
1038 
1039 	int32_t newdl = k_cycle_get_32() + deadline;
1040 
1041 	z_impl_k_thread_absolute_deadline_set(tid, newdl);
1042 }
1043 
1044 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_absolute_deadline_set(k_tid_t tid,int deadline)1045 static inline void z_vrfy_k_thread_absolute_deadline_set(k_tid_t tid, int deadline)
1046 {
1047 	struct k_thread *thread = tid;
1048 
1049 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1050 
1051 	z_impl_k_thread_absolute_deadline_set((k_tid_t)thread, deadline);
1052 }
1053 #include <zephyr/syscalls/k_thread_absolute_deadline_set_mrsh.c>
1054 
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1055 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1056 {
1057 	struct k_thread *thread = tid;
1058 
1059 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1060 	K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1061 				    "invalid thread deadline %d",
1062 				    (int)deadline));
1063 
1064 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1065 }
1066 #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
1067 #endif /* CONFIG_USERSPACE */
1068 #endif /* CONFIG_SCHED_DEADLINE */
1069 
z_impl_k_reschedule(void)1070 void z_impl_k_reschedule(void)
1071 {
1072 	k_spinlock_key_t key;
1073 
1074 	key = k_spin_lock(&_sched_spinlock);
1075 
1076 	update_cache(0);
1077 
1078 	z_reschedule(&_sched_spinlock, key);
1079 }
1080 
1081 #ifdef CONFIG_USERSPACE
z_vrfy_k_reschedule(void)1082 static inline void z_vrfy_k_reschedule(void)
1083 {
1084 	z_impl_k_reschedule();
1085 }
1086 #include <zephyr/syscalls/k_reschedule_mrsh.c>
1087 #endif /* CONFIG_USERSPACE */
1088 
k_can_yield(void)1089 bool k_can_yield(void)
1090 {
1091 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1092 		 z_is_idle_thread_object(_current));
1093 }
1094 
z_impl_k_yield(void)1095 void z_impl_k_yield(void)
1096 {
1097 	__ASSERT(!arch_is_in_isr(), "");
1098 
1099 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1100 
1101 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1102 
1103 	runq_yield();
1104 
1105 	update_cache(1);
1106 	z_swap(&_sched_spinlock, key);
1107 }
1108 
1109 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1110 static inline void z_vrfy_k_yield(void)
1111 {
1112 	z_impl_k_yield();
1113 }
1114 #include <zephyr/syscalls/k_yield_mrsh.c>
1115 #endif /* CONFIG_USERSPACE */
1116 
z_tick_sleep(k_timeout_t timeout)1117 static int32_t z_tick_sleep(k_timeout_t timeout)
1118 {
1119 	uint32_t expected_wakeup_ticks;
1120 
1121 	__ASSERT(!arch_is_in_isr(), "");
1122 
1123 	LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)timeout.ticks);
1124 
1125 	/* K_NO_WAIT is treated as a 'yield' */
1126 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1127 		k_yield();
1128 		return 0;
1129 	}
1130 
1131 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1132 
1133 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1134 	pending_current = _current;
1135 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
1136 	unready_thread(_current);
1137 	expected_wakeup_ticks = (uint32_t)z_add_thread_timeout(_current, timeout);
1138 	z_mark_thread_as_sleeping(_current);
1139 
1140 	(void)z_swap(&_sched_spinlock, key);
1141 
1142 	if (!z_is_aborted_thread_timeout(_current)) {
1143 		return 0;
1144 	}
1145 
1146 	/* We require a 32 bit unsigned subtraction to care a wraparound */
1147 	uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32();
1148 
1149 	/* To handle a negative value correctly, once type-cast it to signed 32 bit */
1150 	k_ticks_t ticks = (k_ticks_t)(int32_t)left_ticks;
1151 
1152 	if (ticks > 0) {
1153 		return ticks;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
z_impl_k_sleep(k_timeout_t timeout)1159 int32_t z_impl_k_sleep(k_timeout_t timeout)
1160 {
1161 	k_ticks_t ticks;
1162 
1163 	__ASSERT(!arch_is_in_isr(), "");
1164 
1165 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1166 
1167 	ticks = z_tick_sleep(timeout);
1168 
1169 	/* k_sleep() still returns 32 bit milliseconds for compatibility */
1170 	int64_t ms = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
1171 		clamp(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX);
1172 
1173 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ms);
1174 	return (int32_t) ms;
1175 }
1176 
1177 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1178 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1179 {
1180 	return z_impl_k_sleep(timeout);
1181 }
1182 #include <zephyr/syscalls/k_sleep_mrsh.c>
1183 #endif /* CONFIG_USERSPACE */
1184 
z_impl_k_usleep(int32_t us)1185 int32_t z_impl_k_usleep(int32_t us)
1186 {
1187 	int32_t ticks;
1188 
1189 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1190 
1191 	ticks = k_us_to_ticks_ceil64(us);
1192 	ticks = z_tick_sleep(Z_TIMEOUT_TICKS(ticks));
1193 
1194 	int32_t ret = k_ticks_to_us_ceil64(ticks);
1195 
1196 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1197 
1198 	return ret;
1199 }
1200 
1201 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int32_t us)1202 static inline int32_t z_vrfy_k_usleep(int32_t us)
1203 {
1204 	return z_impl_k_usleep(us);
1205 }
1206 #include <zephyr/syscalls/k_usleep_mrsh.c>
1207 #endif /* CONFIG_USERSPACE */
1208 
z_impl_k_wakeup(k_tid_t thread)1209 void z_impl_k_wakeup(k_tid_t thread)
1210 {
1211 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1212 
1213 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
1214 
1215 	if (z_is_thread_sleeping(thread)) {
1216 		z_abort_thread_timeout(thread);
1217 		z_mark_thread_as_not_sleeping(thread);
1218 		ready_thread(thread);
1219 		z_reschedule(&_sched_spinlock, key);
1220 	} else {
1221 		k_spin_unlock(&_sched_spinlock, key);
1222 	}
1223 }
1224 
1225 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1226 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1227 {
1228 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1229 	z_impl_k_wakeup(thread);
1230 }
1231 #include <zephyr/syscalls/k_wakeup_mrsh.c>
1232 #endif /* CONFIG_USERSPACE */
1233 
z_impl_k_sched_current_thread_query(void)1234 k_tid_t z_impl_k_sched_current_thread_query(void)
1235 {
1236 	return _current;
1237 }
1238 
1239 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1240 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1241 {
1242 	return z_impl_k_sched_current_thread_query();
1243 }
1244 #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
1245 #endif /* CONFIG_USERSPACE */
1246 
unpend_all(_wait_q_t * wait_q)1247 static inline void unpend_all(_wait_q_t *wait_q)
1248 {
1249 	struct k_thread *thread;
1250 
1251 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
1252 		unpend_thread_no_timeout(thread);
1253 		z_abort_thread_timeout(thread);
1254 		arch_thread_return_value_set(thread, 0);
1255 		ready_thread(thread);
1256 	}
1257 }
1258 
1259 #ifdef CONFIG_THREAD_ABORT_HOOK
1260 extern void thread_abort_hook(struct k_thread *thread);
1261 #endif /* CONFIG_THREAD_ABORT_HOOK */
1262 
1263 /**
1264  * @brief Dequeues the specified thread
1265  *
1266  * Dequeues the specified thread and move it into the specified new state.
1267  *
1268  * @param thread Identify the thread to halt
1269  * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1270  */
halt_thread(struct k_thread * thread,uint8_t new_state)1271 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state)
1272 {
1273 	bool dummify = false;
1274 
1275 	/* We hold the lock, and the thread is known not to be running
1276 	 * anywhere.
1277 	 */
1278 	if ((thread->base.thread_state & new_state) == 0U) {
1279 		thread->base.thread_state |= new_state;
1280 		if (z_is_thread_queued(thread)) {
1281 			dequeue_thread(thread);
1282 		}
1283 
1284 		if (new_state == _THREAD_DEAD) {
1285 			if (thread->base.pended_on != NULL) {
1286 				unpend_thread_no_timeout(thread);
1287 			}
1288 			z_abort_thread_timeout(thread);
1289 			unpend_all(&thread->join_queue);
1290 
1291 			/* Edge case: aborting _current from within an
1292 			 * ISR that preempted it requires clearing the
1293 			 * _current pointer so the upcoming context
1294 			 * switch doesn't clobber the now-freed
1295 			 * memory
1296 			 */
1297 			if (thread == _current && arch_is_in_isr()) {
1298 				dummify = true;
1299 			}
1300 		}
1301 #ifdef CONFIG_SMP
1302 		unpend_all(&thread->halt_queue);
1303 #endif /* CONFIG_SMP */
1304 		update_cache(1);
1305 
1306 		if (new_state == _THREAD_SUSPENDED) {
1307 			clear_halting(thread);
1308 			return;
1309 		}
1310 
1311 		arch_coprocessors_disable(thread);
1312 
1313 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1314 
1315 		z_thread_monitor_exit(thread);
1316 #ifdef CONFIG_THREAD_ABORT_HOOK
1317 		thread_abort_hook(thread);
1318 #endif /* CONFIG_THREAD_ABORT_HOOK */
1319 
1320 #ifdef CONFIG_OBJ_CORE_THREAD
1321 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1322 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1323 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
1324 		k_obj_core_unlink(K_OBJ_CORE(thread));
1325 #endif /* CONFIG_OBJ_CORE_THREAD */
1326 
1327 #ifdef CONFIG_USERSPACE
1328 		z_mem_domain_exit_thread(thread);
1329 		k_thread_perms_all_clear(thread);
1330 		k_object_uninit(thread->stack_obj);
1331 		k_object_uninit(thread);
1332 #endif /* CONFIG_USERSPACE */
1333 
1334 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1335 		k_thread_abort_cleanup(thread);
1336 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1337 
1338 		/* Do this "set _current to dummy" step last so that
1339 		 * subsystems above can rely on _current being
1340 		 * unchanged.  Disabled for posix as that arch
1341 		 * continues to use the _current pointer in its swap
1342 		 * code.  Note that we must leave a non-null switch
1343 		 * handle for any threads spinning in join() (this can
1344 		 * never be used, as our thread is flagged dead, but
1345 		 * it must not be NULL otherwise join can deadlock).
1346 		 * Use 1 as a clearly invalid but non-NULL value.
1347 		 */
1348 		if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1349 #ifdef CONFIG_USE_SWITCH
1350 			_current->switch_handle = (void *)1;
1351 #endif
1352 			z_dummy_thread_init(&_thread_dummy);
1353 
1354 		}
1355 
1356 		/* Finally update the halting thread state, on which
1357 		 * other CPUs might be spinning (see
1358 		 * thread_halt_spin()).
1359 		 */
1360 		clear_halting(thread);
1361 	}
1362 }
1363 
z_thread_abort(struct k_thread * thread)1364 void z_thread_abort(struct k_thread *thread)
1365 {
1366 	bool essential = z_is_thread_essential(thread);
1367 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1368 
1369 	if (z_is_thread_dead(thread)) {
1370 		k_spin_unlock(&_sched_spinlock, key);
1371 		return;
1372 	}
1373 
1374 	z_thread_halt(thread, key, true);
1375 
1376 	if (essential) {
1377 		__ASSERT(!essential, "aborted essential thread %p", thread);
1378 		k_panic();
1379 	}
1380 }
1381 
1382 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(k_tid_t thread)1383 void z_impl_k_thread_abort(k_tid_t thread)
1384 {
1385 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1386 
1387 	z_thread_abort(thread);
1388 
1389 	__ASSERT_NO_MSG(z_is_thread_dead(thread));
1390 
1391 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1392 }
1393 #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
1394 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1395 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1396 {
1397 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1398 	int ret;
1399 
1400 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1401 
1402 	if (z_is_thread_dead(thread)) {
1403 		z_sched_switch_spin(thread);
1404 		ret = 0;
1405 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1406 		ret = -EBUSY;
1407 	} else if ((thread == _current) ||
1408 		   (thread->base.pended_on == &_current->join_queue)) {
1409 		ret = -EDEADLK;
1410 	} else {
1411 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1412 		add_to_waitq_locked(_current, &thread->join_queue);
1413 		add_thread_timeout(_current, timeout);
1414 
1415 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1416 		ret = z_swap(&_sched_spinlock, key);
1417 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1418 
1419 		return ret;
1420 	}
1421 
1422 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1423 
1424 	k_spin_unlock(&_sched_spinlock, key);
1425 	return ret;
1426 }
1427 
1428 #ifdef CONFIG_USERSPACE
1429 /* Special case: don't oops if the thread is uninitialized.  This is because
1430  * the initialization bit does double-duty for thread objects; if false, means
1431  * the thread object is truly uninitialized, or the thread ran and exited for
1432  * some reason.
1433  *
1434  * Return true in this case indicating we should just do nothing and return
1435  * success to the caller.
1436  */
thread_obj_validate(struct k_thread * thread)1437 static bool thread_obj_validate(struct k_thread *thread)
1438 {
1439 	struct k_object *ko = k_object_find(thread);
1440 	int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1441 
1442 	switch (ret) {
1443 	case 0:
1444 		return false;
1445 	case -EINVAL:
1446 		return true;
1447 	default:
1448 #ifdef CONFIG_LOG
1449 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1450 #endif /* CONFIG_LOG */
1451 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1452 	}
1453 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1454 }
1455 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1456 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1457 				       k_timeout_t timeout)
1458 {
1459 	if (thread_obj_validate(thread)) {
1460 		return 0;
1461 	}
1462 
1463 	return z_impl_k_thread_join(thread, timeout);
1464 }
1465 #include <zephyr/syscalls/k_thread_join_mrsh.c>
1466 
z_vrfy_k_thread_abort(k_tid_t thread)1467 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1468 {
1469 	if (thread_obj_validate(thread)) {
1470 		return;
1471 	}
1472 
1473 	K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
1474 				    "aborting essential thread %p", thread));
1475 
1476 	z_impl_k_thread_abort((struct k_thread *)thread);
1477 }
1478 #include <zephyr/syscalls/k_thread_abort_mrsh.c>
1479 #endif /* CONFIG_USERSPACE */
1480 
1481 /*
1482  * future scheduler.h API implementations
1483  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1484 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1485 {
1486 	struct k_thread *thread;
1487 	bool ret = false;
1488 
1489 	K_SPINLOCK(&_sched_spinlock) {
1490 		thread = _priq_wait_best(&wait_q->waitq);
1491 
1492 		if (thread != NULL) {
1493 			z_thread_return_value_set_with_data(thread,
1494 							    swap_retval,
1495 							    swap_data);
1496 			unpend_thread_no_timeout(thread);
1497 			z_abort_thread_timeout(thread);
1498 			ready_thread(thread);
1499 			ret = true;
1500 		}
1501 	}
1502 
1503 	return ret;
1504 }
1505 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1506 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1507 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1508 {
1509 	int ret = z_pend_curr(lock, key, wait_q, timeout);
1510 
1511 	if (data != NULL) {
1512 		*data = _current->base.swap_data;
1513 	}
1514 	return ret;
1515 }
1516 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1517 int z_sched_waitq_walk(_wait_q_t  *wait_q,
1518 		       int (*func)(struct k_thread *, void *), void *data)
1519 {
1520 	struct k_thread *thread;
1521 	int  status = 0;
1522 
1523 	K_SPINLOCK(&_sched_spinlock) {
1524 		_WAIT_Q_FOR_EACH(wait_q, thread) {
1525 
1526 			/*
1527 			 * Invoke the callback function on each waiting thread
1528 			 * for as long as there are both waiting threads AND
1529 			 * it returns 0.
1530 			 */
1531 
1532 			status = func(thread, data);
1533 			if (status != 0) {
1534 				break;
1535 			}
1536 		}
1537 	}
1538 
1539 	return status;
1540 }
1541 
1542 /* This routine exists for benchmarking purposes. It is not used in
1543  * general production code.
1544  */
z_unready_thread(struct k_thread * thread)1545 void z_unready_thread(struct k_thread *thread)
1546 {
1547 	K_SPINLOCK(&_sched_spinlock) {
1548 		unready_thread(thread);
1549 	}
1550 }
1551