1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <zephyr/kernel/internal/sched_priq.h>
10 #include <wait_q.h>
11 #include <kswap.h>
12 #include <kernel_arch_func.h>
13 #include <zephyr/internal/syscall_handler.h>
14 #include <zephyr/drivers/timer/system_timer.h>
15 #include <stdbool.h>
16 #include <kernel_internal.h>
17 #include <zephyr/logging/log.h>
18 #include <zephyr/sys/atomic.h>
19 #include <zephyr/sys/math_extras.h>
20 #include <zephyr/timing/timing.h>
21 #include <zephyr/sys/util.h>
22 
23 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
24 
25 #if defined(CONFIG_SCHED_DUMB)
26 #define _priq_run_add		z_priq_dumb_add
27 #define _priq_run_remove	z_priq_dumb_remove
28 # if defined(CONFIG_SCHED_CPU_MASK)
29 #  define _priq_run_best	_priq_dumb_mask_best
30 # else
31 #  define _priq_run_best	z_priq_dumb_best
32 # endif
33 #elif defined(CONFIG_SCHED_SCALABLE)
34 #define _priq_run_add		z_priq_rb_add
35 #define _priq_run_remove	z_priq_rb_remove
36 #define _priq_run_best		z_priq_rb_best
37 #elif defined(CONFIG_SCHED_MULTIQ)
38 #define _priq_run_add		z_priq_mq_add
39 #define _priq_run_remove	z_priq_mq_remove
40 #define _priq_run_best		z_priq_mq_best
41 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
42 					struct k_thread *thread);
43 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
44 					   struct k_thread *thread);
45 #endif
46 
47 #if defined(CONFIG_WAITQ_SCALABLE)
48 #define z_priq_wait_add		z_priq_rb_add
49 #define _priq_wait_remove	z_priq_rb_remove
50 #define _priq_wait_best		z_priq_rb_best
51 #elif defined(CONFIG_WAITQ_DUMB)
52 #define z_priq_wait_add		z_priq_dumb_add
53 #define _priq_wait_remove	z_priq_dumb_remove
54 #define _priq_wait_best		z_priq_dumb_best
55 #endif
56 
57 struct k_spinlock sched_spinlock;
58 
59 static void update_cache(int preempt_ok);
60 static void halt_thread(struct k_thread *thread, uint8_t new_state);
61 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
62 
63 
is_preempt(struct k_thread * thread)64 static inline int is_preempt(struct k_thread *thread)
65 {
66 	/* explanation in kernel_struct.h */
67 	return thread->base.preempt <= _PREEMPT_THRESHOLD;
68 }
69 
70 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
71 	     "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
72 	     "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
73 	     "threads.");
74 
is_metairq(struct k_thread * thread)75 static inline int is_metairq(struct k_thread *thread)
76 {
77 #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
78 	return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
79 		< CONFIG_NUM_METAIRQ_PRIORITIES;
80 #else
81 	ARG_UNUSED(thread);
82 	return 0;
83 #endif
84 }
85 
86 #if CONFIG_ASSERT
is_thread_dummy(struct k_thread * thread)87 static inline bool is_thread_dummy(struct k_thread *thread)
88 {
89 	return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
90 }
91 #endif
92 
93 /*
94  * Return value same as e.g. memcmp
95  * > 0 -> thread 1 priority  > thread 2 priority
96  * = 0 -> thread 1 priority == thread 2 priority
97  * < 0 -> thread 1 priority  < thread 2 priority
98  * Do not rely on the actual value returned aside from the above.
99  * (Again, like memcmp.)
100  */
z_sched_prio_cmp(struct k_thread * thread_1,struct k_thread * thread_2)101 int32_t z_sched_prio_cmp(struct k_thread *thread_1,
102 	struct k_thread *thread_2)
103 {
104 	/* `prio` is <32b, so the below cannot overflow. */
105 	int32_t b1 = thread_1->base.prio;
106 	int32_t b2 = thread_2->base.prio;
107 
108 	if (b1 != b2) {
109 		return b2 - b1;
110 	}
111 
112 #ifdef CONFIG_SCHED_DEADLINE
113 	/* If we assume all deadlines live within the same "half" of
114 	 * the 32 bit modulus space (this is a documented API rule),
115 	 * then the latest deadline in the queue minus the earliest is
116 	 * guaranteed to be (2's complement) non-negative.  We can
117 	 * leverage that to compare the values without having to check
118 	 * the current time.
119 	 */
120 	uint32_t d1 = thread_1->base.prio_deadline;
121 	uint32_t d2 = thread_2->base.prio_deadline;
122 
123 	if (d1 != d2) {
124 		/* Sooner deadline means higher effective priority.
125 		 * Doing the calculation with unsigned types and casting
126 		 * to signed isn't perfect, but at least reduces this
127 		 * from UB on overflow to impdef.
128 		 */
129 		return (int32_t) (d2 - d1);
130 	}
131 #endif
132 	return 0;
133 }
134 
should_preempt(struct k_thread * thread,int preempt_ok)135 static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
136 					 int preempt_ok)
137 {
138 	/* Preemption is OK if it's being explicitly allowed by
139 	 * software state (e.g. the thread called k_yield())
140 	 */
141 	if (preempt_ok != 0) {
142 		return true;
143 	}
144 
145 	__ASSERT(_current != NULL, "");
146 
147 	/* Or if we're pended/suspended/dummy (duh) */
148 	if (z_is_thread_prevented_from_running(_current)) {
149 		return true;
150 	}
151 
152 	/* Edge case on ARM where a thread can be pended out of an
153 	 * interrupt handler before the "synchronous" swap starts
154 	 * context switching.  Platforms with atomic swap can never
155 	 * hit this.
156 	 */
157 	if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
158 	    && z_is_thread_timeout_active(thread)) {
159 		return true;
160 	}
161 
162 	/* Otherwise we have to be running a preemptible thread or
163 	 * switching to a metairq
164 	 */
165 	if (is_preempt(_current) || is_metairq(thread)) {
166 		return true;
167 	}
168 
169 	return false;
170 }
171 
172 #ifdef CONFIG_SCHED_CPU_MASK
_priq_dumb_mask_best(sys_dlist_t * pq)173 static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
174 {
175 	/* With masks enabled we need to be prepared to walk the list
176 	 * looking for one we can run
177 	 */
178 	struct k_thread *thread;
179 
180 	SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
181 		if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
182 			return thread;
183 		}
184 	}
185 	return NULL;
186 }
187 #endif
188 
189 #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
z_priq_dumb_add(sys_dlist_t * pq,struct k_thread * thread)190 static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
191 					  struct k_thread *thread)
192 {
193 	struct k_thread *t;
194 
195 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
196 
197 	SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
198 		if (z_sched_prio_cmp(thread, t) > 0) {
199 			sys_dlist_insert(&t->base.qnode_dlist,
200 					 &thread->base.qnode_dlist);
201 			return;
202 		}
203 	}
204 
205 	sys_dlist_append(pq, &thread->base.qnode_dlist);
206 }
207 #endif
208 
thread_runq(struct k_thread * thread)209 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
210 {
211 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
212 	int cpu, m = thread->base.cpu_mask;
213 
214 	/* Edge case: it's legal per the API to "make runnable" a
215 	 * thread with all CPUs masked off (i.e. one that isn't
216 	 * actually runnable!).  Sort of a wart in the API and maybe
217 	 * we should address this in docs/assertions instead to avoid
218 	 * the extra test.
219 	 */
220 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
221 
222 	return &_kernel.cpus[cpu].ready_q.runq;
223 #else
224 	ARG_UNUSED(thread);
225 	return &_kernel.ready_q.runq;
226 #endif
227 }
228 
curr_cpu_runq(void)229 static ALWAYS_INLINE void *curr_cpu_runq(void)
230 {
231 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
232 	return &arch_curr_cpu()->ready_q.runq;
233 #else
234 	return &_kernel.ready_q.runq;
235 #endif
236 }
237 
runq_add(struct k_thread * thread)238 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
239 {
240 	_priq_run_add(thread_runq(thread), thread);
241 }
242 
runq_remove(struct k_thread * thread)243 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
244 {
245 	_priq_run_remove(thread_runq(thread), thread);
246 }
247 
runq_best(void)248 static ALWAYS_INLINE struct k_thread *runq_best(void)
249 {
250 	return _priq_run_best(curr_cpu_runq());
251 }
252 
253 /* _current is never in the run queue until context switch on
254  * SMP configurations, see z_requeue_current()
255  */
should_queue_thread(struct k_thread * th)256 static inline bool should_queue_thread(struct k_thread *th)
257 {
258 	return !IS_ENABLED(CONFIG_SMP) || th != _current;
259 }
260 
queue_thread(struct k_thread * thread)261 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
262 {
263 	thread->base.thread_state |= _THREAD_QUEUED;
264 	if (should_queue_thread(thread)) {
265 		runq_add(thread);
266 	}
267 #ifdef CONFIG_SMP
268 	if (thread == _current) {
269 		/* add current to end of queue means "yield" */
270 		_current_cpu->swap_ok = true;
271 	}
272 #endif
273 }
274 
dequeue_thread(struct k_thread * thread)275 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
276 {
277 	thread->base.thread_state &= ~_THREAD_QUEUED;
278 	if (should_queue_thread(thread)) {
279 		runq_remove(thread);
280 	}
281 }
282 
signal_pending_ipi(void)283 static void signal_pending_ipi(void)
284 {
285 	/* Synchronization note: you might think we need to lock these
286 	 * two steps, but an IPI is idempotent.  It's OK if we do it
287 	 * twice.  All we require is that if a CPU sees the flag true,
288 	 * it is guaranteed to send the IPI, and if a core sets
289 	 * pending_ipi, the IPI will be sent the next time through
290 	 * this code.
291 	 */
292 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
293 	if (arch_num_cpus() > 1) {
294 		if (_kernel.pending_ipi) {
295 			_kernel.pending_ipi = false;
296 			arch_sched_ipi();
297 		}
298 	}
299 #endif
300 }
301 
302 #ifdef CONFIG_SMP
303 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
304  * never live in the run queue until we are inexorably on the context
305  * switch path on SMP, otherwise there is a deadlock condition where a
306  * set of CPUs pick a cycle of threads to run and wait for them all to
307  * context switch forever.
308  */
z_requeue_current(struct k_thread * curr)309 void z_requeue_current(struct k_thread *curr)
310 {
311 	if (z_is_thread_queued(curr)) {
312 		runq_add(curr);
313 	}
314 	signal_pending_ipi();
315 }
316 
317 /* Return true if the thread is aborting, else false */
is_aborting(struct k_thread * thread)318 static inline bool is_aborting(struct k_thread *thread)
319 {
320 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
321 }
322 
323 /* Return true if the thread is aborting or suspending, else false */
is_halting(struct k_thread * thread)324 static inline bool is_halting(struct k_thread *thread)
325 {
326 	return (thread->base.thread_state &
327 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
328 }
329 #endif
330 
331 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)332 static inline void clear_halting(struct k_thread *thread)
333 {
334 	thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
335 }
336 
next_up(void)337 static ALWAYS_INLINE struct k_thread *next_up(void)
338 {
339 #ifdef CONFIG_SMP
340 	if (is_halting(_current)) {
341 		halt_thread(_current, is_aborting(_current) ?
342 				      _THREAD_DEAD : _THREAD_SUSPENDED);
343 	}
344 #endif
345 
346 	struct k_thread *thread = runq_best();
347 
348 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
349 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
350 	/* MetaIRQs must always attempt to return back to a
351 	 * cooperative thread they preempted and not whatever happens
352 	 * to be highest priority now. The cooperative thread was
353 	 * promised it wouldn't be preempted (by non-metairq threads)!
354 	 */
355 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
356 
357 	if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
358 		if (!z_is_thread_prevented_from_running(mirqp)) {
359 			thread = mirqp;
360 		} else {
361 			_current_cpu->metairq_preempted = NULL;
362 		}
363 	}
364 #endif
365 
366 #ifndef CONFIG_SMP
367 	/* In uniprocessor mode, we can leave the current thread in
368 	 * the queue (actually we have to, otherwise the assembly
369 	 * context switch code for all architectures would be
370 	 * responsible for putting it back in z_swap and ISR return!),
371 	 * which makes this choice simple.
372 	 */
373 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
374 #else
375 	/* Under SMP, the "cache" mechanism for selecting the next
376 	 * thread doesn't work, so we have more work to do to test
377 	 * _current against the best choice from the queue.  Here, the
378 	 * thread selected above represents "the best thread that is
379 	 * not current".
380 	 *
381 	 * Subtle note on "queued": in SMP mode, _current does not
382 	 * live in the queue, so this isn't exactly the same thing as
383 	 * "ready", it means "is _current already added back to the
384 	 * queue such that we don't want to re-add it".
385 	 */
386 	bool queued = z_is_thread_queued(_current);
387 	bool active = !z_is_thread_prevented_from_running(_current);
388 
389 	if (thread == NULL) {
390 		thread = _current_cpu->idle_thread;
391 	}
392 
393 	if (active) {
394 		int32_t cmp = z_sched_prio_cmp(_current, thread);
395 
396 		/* Ties only switch if state says we yielded */
397 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
398 			thread = _current;
399 		}
400 
401 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
402 			thread = _current;
403 		}
404 	}
405 
406 	/* Put _current back into the queue */
407 	if (thread != _current && active &&
408 		!z_is_idle_thread_object(_current) && !queued) {
409 		queue_thread(_current);
410 	}
411 
412 	/* Take the new _current out of the queue */
413 	if (z_is_thread_queued(thread)) {
414 		dequeue_thread(thread);
415 	}
416 
417 	_current_cpu->swap_ok = false;
418 	return thread;
419 #endif
420 }
421 
move_thread_to_end_of_prio_q(struct k_thread * thread)422 static void move_thread_to_end_of_prio_q(struct k_thread *thread)
423 {
424 	if (z_is_thread_queued(thread)) {
425 		dequeue_thread(thread);
426 	}
427 	queue_thread(thread);
428 	update_cache(thread == _current);
429 }
430 
flag_ipi(void)431 static void flag_ipi(void)
432 {
433 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
434 	if (arch_num_cpus() > 1) {
435 		_kernel.pending_ipi = true;
436 	}
437 #endif
438 }
439 
440 #ifdef CONFIG_TIMESLICING
441 
442 static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
443 static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
444 static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
445 static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
446 
447 #ifdef CONFIG_SWAP_NONATOMIC
448 /* If z_swap() isn't atomic, then it's possible for a timer interrupt
449  * to try to timeslice away _current after it has already pended
450  * itself but before the corresponding context switch.  Treat that as
451  * a noop condition in z_time_slice().
452  */
453 static struct k_thread *pending_current;
454 #endif
455 
slice_time(struct k_thread * thread)456 static inline int slice_time(struct k_thread *thread)
457 {
458 	int ret = slice_ticks;
459 
460 #ifdef CONFIG_TIMESLICE_PER_THREAD
461 	if (thread->base.slice_ticks != 0) {
462 		ret = thread->base.slice_ticks;
463 	}
464 #else
465 	ARG_UNUSED(thread);
466 #endif
467 	return ret;
468 }
469 
sliceable(struct k_thread * thread)470 static inline bool sliceable(struct k_thread *thread)
471 {
472 	bool ret = is_preempt(thread)
473 		&& slice_time(thread) != 0
474 		&& !z_is_prio_higher(thread->base.prio, slice_max_prio)
475 		&& !z_is_thread_prevented_from_running(thread)
476 		&& !z_is_idle_thread_object(thread);
477 
478 #ifdef CONFIG_TIMESLICE_PER_THREAD
479 	ret |= thread->base.slice_ticks != 0;
480 #endif
481 
482 	return ret;
483 }
484 
slice_timeout(struct _timeout * t)485 static void slice_timeout(struct _timeout *t)
486 {
487 	int cpu = ARRAY_INDEX(slice_timeouts, t);
488 
489 	slice_expired[cpu] = true;
490 
491 	/* We need an IPI if we just handled a timeslice expiration
492 	 * for a different CPU.  Ideally this would be able to target
493 	 * the specific core, but that's not part of the API yet.
494 	 */
495 	if (IS_ENABLED(CONFIG_SMP) && cpu != _current_cpu->id) {
496 		flag_ipi();
497 	}
498 }
499 
z_reset_time_slice(struct k_thread * curr)500 void z_reset_time_slice(struct k_thread *curr)
501 {
502 	int cpu = _current_cpu->id;
503 
504 	z_abort_timeout(&slice_timeouts[cpu]);
505 	slice_expired[cpu] = false;
506 	if (sliceable(curr)) {
507 		z_add_timeout(&slice_timeouts[cpu], slice_timeout,
508 			      K_TICKS(slice_time(curr) - 1));
509 	}
510 }
511 
k_sched_time_slice_set(int32_t slice,int prio)512 void k_sched_time_slice_set(int32_t slice, int prio)
513 {
514 	K_SPINLOCK(&sched_spinlock) {
515 		slice_ticks = k_ms_to_ticks_ceil32(slice);
516 		slice_max_prio = prio;
517 		z_reset_time_slice(_current);
518 	}
519 }
520 
521 #ifdef CONFIG_TIMESLICE_PER_THREAD
k_thread_time_slice_set(struct k_thread * th,int32_t thread_slice_ticks,k_thread_timeslice_fn_t expired,void * data)522 void k_thread_time_slice_set(struct k_thread *th, int32_t thread_slice_ticks,
523 			     k_thread_timeslice_fn_t expired, void *data)
524 {
525 	K_SPINLOCK(&sched_spinlock) {
526 		th->base.slice_ticks = thread_slice_ticks;
527 		th->base.slice_expired = expired;
528 		th->base.slice_data = data;
529 	}
530 }
531 #endif
532 
533 /* Called out of each timer interrupt */
z_time_slice(void)534 void z_time_slice(void)
535 {
536 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
537 	struct k_thread *curr = _current;
538 
539 #ifdef CONFIG_SWAP_NONATOMIC
540 	if (pending_current == curr) {
541 		z_reset_time_slice(curr);
542 		k_spin_unlock(&sched_spinlock, key);
543 		return;
544 	}
545 	pending_current = NULL;
546 #endif
547 
548 	if (slice_expired[_current_cpu->id] && sliceable(curr)) {
549 #ifdef CONFIG_TIMESLICE_PER_THREAD
550 		if (curr->base.slice_expired) {
551 			k_spin_unlock(&sched_spinlock, key);
552 			curr->base.slice_expired(curr, curr->base.slice_data);
553 			key = k_spin_lock(&sched_spinlock);
554 		}
555 #endif
556 		if (!z_is_thread_prevented_from_running(curr)) {
557 			move_thread_to_end_of_prio_q(curr);
558 		}
559 		z_reset_time_slice(curr);
560 	}
561 	k_spin_unlock(&sched_spinlock, key);
562 }
563 #endif
564 
565 /* Track cooperative threads preempted by metairqs so we can return to
566  * them specifically.  Called at the moment a new thread has been
567  * selected to run.
568  */
update_metairq_preempt(struct k_thread * thread)569 static void update_metairq_preempt(struct k_thread *thread)
570 {
571 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
572 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
573 	if (is_metairq(thread) && !is_metairq(_current) &&
574 	    !is_preempt(_current)) {
575 		/* Record new preemption */
576 		_current_cpu->metairq_preempted = _current;
577 	} else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
578 		/* Returning from existing preemption */
579 		_current_cpu->metairq_preempted = NULL;
580 	}
581 #else
582 	ARG_UNUSED(thread);
583 #endif
584 }
585 
update_cache(int preempt_ok)586 static void update_cache(int preempt_ok)
587 {
588 #ifndef CONFIG_SMP
589 	struct k_thread *thread = next_up();
590 
591 	if (should_preempt(thread, preempt_ok)) {
592 #ifdef CONFIG_TIMESLICING
593 		if (thread != _current) {
594 			z_reset_time_slice(thread);
595 		}
596 #endif
597 		update_metairq_preempt(thread);
598 		_kernel.ready_q.cache = thread;
599 	} else {
600 		_kernel.ready_q.cache = _current;
601 	}
602 
603 #else
604 	/* The way this works is that the CPU record keeps its
605 	 * "cooperative swapping is OK" flag until the next reschedule
606 	 * call or context switch.  It doesn't need to be tracked per
607 	 * thread because if the thread gets preempted for whatever
608 	 * reason the scheduler will make the same decision anyway.
609 	 */
610 	_current_cpu->swap_ok = preempt_ok;
611 #endif
612 }
613 
thread_active_elsewhere(struct k_thread * thread)614 static bool thread_active_elsewhere(struct k_thread *thread)
615 {
616 	/* True if the thread is currently running on another CPU.
617 	 * There are more scalable designs to answer this question in
618 	 * constant time, but this is fine for now.
619 	 */
620 #ifdef CONFIG_SMP
621 	int currcpu = _current_cpu->id;
622 
623 	unsigned int num_cpus = arch_num_cpus();
624 
625 	for (int i = 0; i < num_cpus; i++) {
626 		if ((i != currcpu) &&
627 		    (_kernel.cpus[i].current == thread)) {
628 			return true;
629 		}
630 	}
631 #endif
632 	ARG_UNUSED(thread);
633 	return false;
634 }
635 
ready_thread(struct k_thread * thread)636 static void ready_thread(struct k_thread *thread)
637 {
638 #ifdef CONFIG_KERNEL_COHERENCE
639 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
640 #endif
641 
642 	/* If thread is queued already, do not try and added it to the
643 	 * run queue again
644 	 */
645 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
646 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
647 
648 		queue_thread(thread);
649 		update_cache(0);
650 		flag_ipi();
651 	}
652 }
653 
z_ready_thread(struct k_thread * thread)654 void z_ready_thread(struct k_thread *thread)
655 {
656 	K_SPINLOCK(&sched_spinlock) {
657 		if (!thread_active_elsewhere(thread)) {
658 			ready_thread(thread);
659 		}
660 	}
661 }
662 
z_move_thread_to_end_of_prio_q(struct k_thread * thread)663 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
664 {
665 	K_SPINLOCK(&sched_spinlock) {
666 		move_thread_to_end_of_prio_q(thread);
667 	}
668 }
669 
z_sched_start(struct k_thread * thread)670 void z_sched_start(struct k_thread *thread)
671 {
672 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
673 
674 	if (z_has_thread_started(thread)) {
675 		k_spin_unlock(&sched_spinlock, key);
676 		return;
677 	}
678 
679 	z_mark_thread_as_started(thread);
680 	ready_thread(thread);
681 	z_reschedule(&sched_spinlock, key);
682 }
683 
684 /**
685  * @brief Halt a thread
686  *
687  * If the target thread is running on another CPU, flag it as needing to
688  * abort and send an IPI (if supported) to force a schedule point and wait
689  * until the target thread is switched out (ISRs will spin to wait and threads
690  * will block to wait). If the target thread is not running on another CPU,
691  * then it is safe to act immediately.
692  *
693  * Upon entry to this routine, the scheduler lock is already held. It is
694  * released before this routine returns.
695  *
696  * @param thread Thread to suspend or abort
697  * @param key Current key for sched_spinlock
698  * @param terminate True if aborting thread, false if suspending thread
699  */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)700 static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
701 			  bool terminate)
702 {
703 #ifdef CONFIG_SMP
704 	if (is_halting(_current) && arch_is_in_isr()) {
705 		/* Another CPU (in an ISR) or thread is waiting for the
706 		 * current thread to halt. Halt it now to help avoid a
707 		 * potential deadlock.
708 		 */
709 		halt_thread(_current,
710 			    is_aborting(_current) ? _THREAD_DEAD
711 						  : _THREAD_SUSPENDED);
712 	}
713 
714 	bool active = thread_active_elsewhere(thread);
715 
716 	if (active) {
717 		/* It's running somewhere else, flag and poke */
718 		thread->base.thread_state |= (terminate ? _THREAD_ABORTING
719 							: _THREAD_SUSPENDING);
720 
721 		/* We might spin to wait, so a true synchronous IPI is needed
722 		 * here, not deferred!
723 		 */
724 #ifdef CONFIG_SCHED_IPI_SUPPORTED
725 		arch_sched_ipi();
726 #endif
727 	}
728 
729 	if (is_halting(thread) && (thread != _current)) {
730 		if (arch_is_in_isr()) {
731 			/* ISRs can only spin waiting another CPU */
732 			k_spin_unlock(&sched_spinlock, key);
733 			while (is_halting(thread)) {
734 			}
735 
736 			/* Now we know it's halting, but not necessarily
737 			 * halted (suspended or aborted). Wait for the switch
738 			 * to happen!
739 			 */
740 			key = k_spin_lock(&sched_spinlock);
741 			z_sched_switch_spin(thread);
742 			k_spin_unlock(&sched_spinlock, key);
743 		} else if (active) {
744 			/* Threads can wait on a queue */
745 			add_to_waitq_locked(_current, terminate ?
746 						      &thread->join_queue :
747 						      &thread->halt_queue);
748 			z_swap(&sched_spinlock, key);
749 		}
750 		return; /* lock has been released */
751 	}
752 #endif
753 	halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
754 	if ((thread == _current) && !arch_is_in_isr()) {
755 		z_swap(&sched_spinlock, key);
756 		__ASSERT(!terminate, "aborted _current back from dead");
757 	} else {
758 		k_spin_unlock(&sched_spinlock, key);
759 	}
760 }
761 
z_impl_k_thread_suspend(struct k_thread * thread)762 void z_impl_k_thread_suspend(struct k_thread *thread)
763 {
764 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
765 
766 	(void)z_abort_thread_timeout(thread);
767 
768 	k_spinlock_key_t  key = k_spin_lock(&sched_spinlock);
769 
770 	if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
771 
772 		/* The target thread is already suspended. Nothing to do. */
773 
774 		k_spin_unlock(&sched_spinlock, key);
775 		return;
776 	}
777 
778 	z_thread_halt(thread, key, false);
779 
780 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
781 }
782 
783 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(struct k_thread * thread)784 static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
785 {
786 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
787 	z_impl_k_thread_suspend(thread);
788 }
789 #include <syscalls/k_thread_suspend_mrsh.c>
790 #endif
791 
z_impl_k_thread_resume(struct k_thread * thread)792 void z_impl_k_thread_resume(struct k_thread *thread)
793 {
794 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
795 
796 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
797 
798 	/* Do not try to resume a thread that was not suspended */
799 	if (!z_is_thread_suspended(thread)) {
800 		k_spin_unlock(&sched_spinlock, key);
801 		return;
802 	}
803 
804 	z_mark_thread_as_not_suspended(thread);
805 	ready_thread(thread);
806 
807 	z_reschedule(&sched_spinlock, key);
808 
809 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
810 }
811 
812 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(struct k_thread * thread)813 static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
814 {
815 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
816 	z_impl_k_thread_resume(thread);
817 }
818 #include <syscalls/k_thread_resume_mrsh.c>
819 #endif
820 
pended_on_thread(struct k_thread * thread)821 static _wait_q_t *pended_on_thread(struct k_thread *thread)
822 {
823 	__ASSERT_NO_MSG(thread->base.pended_on);
824 
825 	return thread->base.pended_on;
826 }
827 
unready_thread(struct k_thread * thread)828 static void unready_thread(struct k_thread *thread)
829 {
830 	if (z_is_thread_queued(thread)) {
831 		dequeue_thread(thread);
832 	}
833 	update_cache(thread == _current);
834 }
835 
836 /* sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)837 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
838 {
839 	unready_thread(thread);
840 	z_mark_thread_as_pending(thread);
841 
842 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
843 
844 	if (wait_q != NULL) {
845 		thread->base.pended_on = wait_q;
846 		z_priq_wait_add(&wait_q->waitq, thread);
847 	}
848 }
849 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)850 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
851 {
852 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
853 		z_add_thread_timeout(thread, timeout);
854 	}
855 }
856 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)857 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
858 			k_timeout_t timeout)
859 {
860 #ifdef CONFIG_KERNEL_COHERENCE
861 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
862 #endif
863 	add_to_waitq_locked(thread, wait_q);
864 	add_thread_timeout(thread, timeout);
865 }
866 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)867 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
868 		   k_timeout_t timeout)
869 {
870 	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
871 	K_SPINLOCK(&sched_spinlock) {
872 		pend_locked(thread, wait_q, timeout);
873 	}
874 }
875 
unpend_thread_no_timeout(struct k_thread * thread)876 static inline void unpend_thread_no_timeout(struct k_thread *thread)
877 {
878 	_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
879 	z_mark_thread_as_not_pending(thread);
880 	thread->base.pended_on = NULL;
881 }
882 
z_unpend_thread_no_timeout(struct k_thread * thread)883 ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
884 {
885 	K_SPINLOCK(&sched_spinlock) {
886 		if (thread->base.pended_on != NULL) {
887 			unpend_thread_no_timeout(thread);
888 		}
889 	}
890 }
891 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)892 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
893 {
894 	K_SPINLOCK(&sched_spinlock) {
895 		bool killed = (thread->base.thread_state &
896 				(_THREAD_DEAD | _THREAD_ABORTING));
897 
898 #ifdef CONFIG_EVENTS
899 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
900 
901 		thread->no_wake_on_timeout = false;
902 
903 		if (do_nothing) {
904 			continue;
905 		}
906 #endif
907 
908 		if (!killed) {
909 			/* The thread is not being killed */
910 			if (thread->base.pended_on != NULL) {
911 				unpend_thread_no_timeout(thread);
912 			}
913 			z_mark_thread_as_started(thread);
914 			if (is_timeout) {
915 				z_mark_thread_as_not_suspended(thread);
916 			}
917 			ready_thread(thread);
918 		}
919 	}
920 
921 }
922 
923 #ifdef CONFIG_SYS_CLOCK_EXISTS
924 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)925 void z_thread_timeout(struct _timeout *timeout)
926 {
927 	struct k_thread *thread = CONTAINER_OF(timeout,
928 					       struct k_thread, base.timeout);
929 
930 	z_sched_wake_thread(thread, true);
931 }
932 #endif
933 
z_pend_curr_irqlock(uint32_t key,_wait_q_t * wait_q,k_timeout_t timeout)934 int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
935 {
936 	/* This is a legacy API for pre-switch architectures and isn't
937 	 * correctly synchronized for multi-cpu use
938 	 */
939 	__ASSERT_NO_MSG(!IS_ENABLED(CONFIG_SMP));
940 
941 	pend_locked(_current, wait_q, timeout);
942 
943 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
944 	pending_current = _current;
945 
946 	int ret = z_swap_irqlock(key);
947 	K_SPINLOCK(&sched_spinlock) {
948 		if (pending_current == _current) {
949 			pending_current = NULL;
950 		}
951 	}
952 	return ret;
953 #else
954 	return z_swap_irqlock(key);
955 #endif
956 }
957 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)958 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
959 	       _wait_q_t *wait_q, k_timeout_t timeout)
960 {
961 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
962 	pending_current = _current;
963 #endif
964 	__ASSERT_NO_MSG(sizeof(sched_spinlock) == 0 || lock != &sched_spinlock);
965 
966 	/* We do a "lock swap" prior to calling z_swap(), such that
967 	 * the caller's lock gets released as desired.  But we ensure
968 	 * that we hold the scheduler lock and leave local interrupts
969 	 * masked until we reach the context swich.  z_swap() itself
970 	 * has similar code; the duplication is because it's a legacy
971 	 * API that doesn't expect to be called with scheduler lock
972 	 * held.
973 	 */
974 	(void) k_spin_lock(&sched_spinlock);
975 	pend_locked(_current, wait_q, timeout);
976 	k_spin_release(lock);
977 	return z_swap(&sched_spinlock, key);
978 }
979 
z_unpend1_no_timeout(_wait_q_t * wait_q)980 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
981 {
982 	struct k_thread *thread = NULL;
983 
984 	K_SPINLOCK(&sched_spinlock) {
985 		thread = _priq_wait_best(&wait_q->waitq);
986 
987 		if (thread != NULL) {
988 			unpend_thread_no_timeout(thread);
989 		}
990 	}
991 
992 	return thread;
993 }
994 
z_unpend_first_thread(_wait_q_t * wait_q)995 struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
996 {
997 	struct k_thread *thread = NULL;
998 
999 	K_SPINLOCK(&sched_spinlock) {
1000 		thread = _priq_wait_best(&wait_q->waitq);
1001 
1002 		if (thread != NULL) {
1003 			unpend_thread_no_timeout(thread);
1004 			(void)z_abort_thread_timeout(thread);
1005 		}
1006 	}
1007 
1008 	return thread;
1009 }
1010 
z_unpend_thread(struct k_thread * thread)1011 void z_unpend_thread(struct k_thread *thread)
1012 {
1013 	z_unpend_thread_no_timeout(thread);
1014 	(void)z_abort_thread_timeout(thread);
1015 }
1016 
1017 /* Priority set utility that does no rescheduling, it just changes the
1018  * run queue state, returning true if a reschedule is needed later.
1019  */
z_set_prio(struct k_thread * thread,int prio)1020 bool z_set_prio(struct k_thread *thread, int prio)
1021 {
1022 	bool need_sched = 0;
1023 
1024 	K_SPINLOCK(&sched_spinlock) {
1025 		need_sched = z_is_thread_ready(thread);
1026 
1027 		if (need_sched) {
1028 			/* Don't requeue on SMP if it's the running thread */
1029 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
1030 				dequeue_thread(thread);
1031 				thread->base.prio = prio;
1032 				queue_thread(thread);
1033 			} else {
1034 				thread->base.prio = prio;
1035 			}
1036 			update_cache(1);
1037 		} else {
1038 			thread->base.prio = prio;
1039 		}
1040 	}
1041 
1042 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
1043 
1044 	return need_sched;
1045 }
1046 
z_thread_priority_set(struct k_thread * thread,int prio)1047 void z_thread_priority_set(struct k_thread *thread, int prio)
1048 {
1049 	bool need_sched = z_set_prio(thread, prio);
1050 
1051 	flag_ipi();
1052 
1053 	if (need_sched && _current->base.sched_locked == 0U) {
1054 		z_reschedule_unlocked();
1055 	}
1056 }
1057 
resched(uint32_t key)1058 static inline bool resched(uint32_t key)
1059 {
1060 #ifdef CONFIG_SMP
1061 	_current_cpu->swap_ok = 0;
1062 #endif
1063 
1064 	return arch_irq_unlocked(key) && !arch_is_in_isr();
1065 }
1066 
1067 /*
1068  * Check if the next ready thread is the same as the current thread
1069  * and save the trip if true.
1070  */
need_swap(void)1071 static inline bool need_swap(void)
1072 {
1073 	/* the SMP case will be handled in C based z_swap() */
1074 #ifdef CONFIG_SMP
1075 	return true;
1076 #else
1077 	struct k_thread *new_thread;
1078 
1079 	/* Check if the next ready thread is the same as the current thread */
1080 	new_thread = _kernel.ready_q.cache;
1081 	return new_thread != _current;
1082 #endif
1083 }
1084 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)1085 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
1086 {
1087 	if (resched(key.key) && need_swap()) {
1088 		z_swap(lock, key);
1089 	} else {
1090 		k_spin_unlock(lock, key);
1091 		signal_pending_ipi();
1092 	}
1093 }
1094 
z_reschedule_irqlock(uint32_t key)1095 void z_reschedule_irqlock(uint32_t key)
1096 {
1097 	if (resched(key) && need_swap()) {
1098 		z_swap_irqlock(key);
1099 	} else {
1100 		irq_unlock(key);
1101 		signal_pending_ipi();
1102 	}
1103 }
1104 
k_sched_lock(void)1105 void k_sched_lock(void)
1106 {
1107 	K_SPINLOCK(&sched_spinlock) {
1108 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
1109 
1110 		z_sched_lock();
1111 	}
1112 }
1113 
k_sched_unlock(void)1114 void k_sched_unlock(void)
1115 {
1116 	K_SPINLOCK(&sched_spinlock) {
1117 		__ASSERT(_current->base.sched_locked != 0U, "");
1118 		__ASSERT(!arch_is_in_isr(), "");
1119 
1120 		++_current->base.sched_locked;
1121 		update_cache(0);
1122 	}
1123 
1124 	LOG_DBG("scheduler unlocked (%p:%d)",
1125 		_current, _current->base.sched_locked);
1126 
1127 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
1128 
1129 	z_reschedule_unlocked();
1130 }
1131 
z_swap_next_thread(void)1132 struct k_thread *z_swap_next_thread(void)
1133 {
1134 #ifdef CONFIG_SMP
1135 	struct k_thread *ret = next_up();
1136 
1137 	if (ret == _current) {
1138 		/* When not swapping, have to signal IPIs here.  In
1139 		 * the context switch case it must happen later, after
1140 		 * _current gets requeued.
1141 		 */
1142 		signal_pending_ipi();
1143 	}
1144 	return ret;
1145 #else
1146 	return _kernel.ready_q.cache;
1147 #endif
1148 }
1149 
1150 #ifdef CONFIG_USE_SWITCH
1151 /* Just a wrapper around _current = xxx with tracing */
set_current(struct k_thread * new_thread)1152 static inline void set_current(struct k_thread *new_thread)
1153 {
1154 	z_thread_mark_switched_out();
1155 	_current_cpu->current = new_thread;
1156 }
1157 
1158 /**
1159  * @brief Determine next thread to execute upon completion of an interrupt
1160  *
1161  * Thread preemption is performed by context switching after the completion
1162  * of a non-recursed interrupt. This function determines which thread to
1163  * switch to if any. This function accepts as @p interrupted either:
1164  *
1165  * - The handle for the interrupted thread in which case the thread's context
1166  *   must already be fully saved and ready to be picked up by a different CPU.
1167  *
1168  * - NULL if more work is required to fully save the thread's state after
1169  *   it is known that a new thread is to be scheduled. It is up to the caller
1170  *   to store the handle resulting from the thread that is being switched out
1171  *   in that thread's "switch_handle" field after its
1172  *   context has fully been saved, following the same requirements as with
1173  *   the @ref arch_switch() function.
1174  *
1175  * If a new thread needs to be scheduled then its handle is returned.
1176  * Otherwise the same value provided as @p interrupted is returned back.
1177  * Those handles are the same opaque types used by the @ref arch_switch()
1178  * function.
1179  *
1180  * @warning
1181  * The @ref _current value may have changed after this call and not refer
1182  * to the interrupted thread anymore. It might be necessary to make a local
1183  * copy before calling this function.
1184  *
1185  * @param interrupted Handle for the thread that was interrupted or NULL.
1186  * @retval Handle for the next thread to execute, or @p interrupted when
1187  *         no new thread is to be scheduled.
1188  */
z_get_next_switch_handle(void * interrupted)1189 void *z_get_next_switch_handle(void *interrupted)
1190 {
1191 	z_check_stack_sentinel();
1192 
1193 #ifdef CONFIG_SMP
1194 	void *ret = NULL;
1195 
1196 	K_SPINLOCK(&sched_spinlock) {
1197 		struct k_thread *old_thread = _current, *new_thread;
1198 
1199 		if (IS_ENABLED(CONFIG_SMP)) {
1200 			old_thread->switch_handle = NULL;
1201 		}
1202 		new_thread = next_up();
1203 
1204 		z_sched_usage_switch(new_thread);
1205 
1206 		if (old_thread != new_thread) {
1207 			update_metairq_preempt(new_thread);
1208 			z_sched_switch_spin(new_thread);
1209 			arch_cohere_stacks(old_thread, interrupted, new_thread);
1210 
1211 			_current_cpu->swap_ok = 0;
1212 			set_current(new_thread);
1213 
1214 #ifdef CONFIG_TIMESLICING
1215 			z_reset_time_slice(new_thread);
1216 #endif
1217 
1218 #ifdef CONFIG_SPIN_VALIDATE
1219 			/* Changed _current!  Update the spinlock
1220 			 * bookkeeping so the validation doesn't get
1221 			 * confused when the "wrong" thread tries to
1222 			 * release the lock.
1223 			 */
1224 			z_spin_lock_set_owner(&sched_spinlock);
1225 #endif
1226 
1227 			/* A queued (runnable) old/current thread
1228 			 * needs to be added back to the run queue
1229 			 * here, and atomically with its switch handle
1230 			 * being set below.  This is safe now, as we
1231 			 * will not return into it.
1232 			 */
1233 			if (z_is_thread_queued(old_thread)) {
1234 				runq_add(old_thread);
1235 			}
1236 		}
1237 		old_thread->switch_handle = interrupted;
1238 		ret = new_thread->switch_handle;
1239 		if (IS_ENABLED(CONFIG_SMP)) {
1240 			/* Active threads MUST have a null here */
1241 			new_thread->switch_handle = NULL;
1242 		}
1243 	}
1244 	signal_pending_ipi();
1245 	return ret;
1246 #else
1247 	z_sched_usage_switch(_kernel.ready_q.cache);
1248 	_current->switch_handle = interrupted;
1249 	set_current(_kernel.ready_q.cache);
1250 	return _current->switch_handle;
1251 #endif
1252 }
1253 #endif
1254 
z_priq_dumb_remove(sys_dlist_t * pq,struct k_thread * thread)1255 void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
1256 {
1257 	ARG_UNUSED(pq);
1258 
1259 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1260 
1261 	sys_dlist_remove(&thread->base.qnode_dlist);
1262 }
1263 
z_priq_dumb_best(sys_dlist_t * pq)1264 struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
1265 {
1266 	struct k_thread *thread = NULL;
1267 	sys_dnode_t *n = sys_dlist_peek_head(pq);
1268 
1269 	if (n != NULL) {
1270 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
1271 	}
1272 	return thread;
1273 }
1274 
z_priq_rb_lessthan(struct rbnode * a,struct rbnode * b)1275 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
1276 {
1277 	struct k_thread *thread_a, *thread_b;
1278 	int32_t cmp;
1279 
1280 	thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
1281 	thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
1282 
1283 	cmp = z_sched_prio_cmp(thread_a, thread_b);
1284 
1285 	if (cmp > 0) {
1286 		return true;
1287 	} else if (cmp < 0) {
1288 		return false;
1289 	} else {
1290 		return thread_a->base.order_key < thread_b->base.order_key
1291 			? 1 : 0;
1292 	}
1293 }
1294 
z_priq_rb_add(struct _priq_rb * pq,struct k_thread * thread)1295 void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
1296 {
1297 	struct k_thread *t;
1298 
1299 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1300 
1301 	thread->base.order_key = pq->next_order_key++;
1302 
1303 	/* Renumber at wraparound.  This is tiny code, and in practice
1304 	 * will almost never be hit on real systems.  BUT on very
1305 	 * long-running systems where a priq never completely empties
1306 	 * AND that contains very large numbers of threads, it can be
1307 	 * a latency glitch to loop over all the threads like this.
1308 	 */
1309 	if (!pq->next_order_key) {
1310 		RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
1311 			t->base.order_key = pq->next_order_key++;
1312 		}
1313 	}
1314 
1315 	rb_insert(&pq->tree, &thread->base.qnode_rb);
1316 }
1317 
z_priq_rb_remove(struct _priq_rb * pq,struct k_thread * thread)1318 void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
1319 {
1320 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1321 
1322 	rb_remove(&pq->tree, &thread->base.qnode_rb);
1323 
1324 	if (!pq->tree.root) {
1325 		pq->next_order_key = 0;
1326 	}
1327 }
1328 
z_priq_rb_best(struct _priq_rb * pq)1329 struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
1330 {
1331 	struct k_thread *thread = NULL;
1332 	struct rbnode *n = rb_get_min(&pq->tree);
1333 
1334 	if (n != NULL) {
1335 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
1336 	}
1337 	return thread;
1338 }
1339 
1340 #ifdef CONFIG_SCHED_MULTIQ
1341 # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
1342 # error Too many priorities for multiqueue scheduler (max 32)
1343 # endif
1344 
z_priq_mq_add(struct _priq_mq * pq,struct k_thread * thread)1345 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
1346 					struct k_thread *thread)
1347 {
1348 	int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1349 
1350 	sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
1351 	pq->bitmask |= BIT(priority_bit);
1352 }
1353 
z_priq_mq_remove(struct _priq_mq * pq,struct k_thread * thread)1354 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
1355 					   struct k_thread *thread)
1356 {
1357 	int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1358 
1359 	sys_dlist_remove(&thread->base.qnode_dlist);
1360 	if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
1361 		pq->bitmask &= ~BIT(priority_bit);
1362 	}
1363 }
1364 #endif
1365 
z_priq_mq_best(struct _priq_mq * pq)1366 struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
1367 {
1368 	if (!pq->bitmask) {
1369 		return NULL;
1370 	}
1371 
1372 	struct k_thread *thread = NULL;
1373 	sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
1374 	sys_dnode_t *n = sys_dlist_peek_head(l);
1375 
1376 	if (n != NULL) {
1377 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
1378 	}
1379 	return thread;
1380 }
1381 
z_unpend_all(_wait_q_t * wait_q)1382 int z_unpend_all(_wait_q_t *wait_q)
1383 {
1384 	int need_sched = 0;
1385 	struct k_thread *thread;
1386 
1387 	while ((thread = z_waitq_head(wait_q)) != NULL) {
1388 		z_unpend_thread(thread);
1389 		z_ready_thread(thread);
1390 		need_sched = 1;
1391 	}
1392 
1393 	return need_sched;
1394 }
1395 
init_ready_q(struct _ready_q * rq)1396 void init_ready_q(struct _ready_q *rq)
1397 {
1398 #if defined(CONFIG_SCHED_SCALABLE)
1399 	rq->runq = (struct _priq_rb) {
1400 		.tree = {
1401 			.lessthan_fn = z_priq_rb_lessthan,
1402 		}
1403 	};
1404 #elif defined(CONFIG_SCHED_MULTIQ)
1405 	for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
1406 		sys_dlist_init(&rq->runq.queues[i]);
1407 	}
1408 #else
1409 	sys_dlist_init(&rq->runq);
1410 #endif
1411 }
1412 
z_sched_init(void)1413 void z_sched_init(void)
1414 {
1415 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1416 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
1417 		init_ready_q(&_kernel.cpus[i].ready_q);
1418 	}
1419 #else
1420 	init_ready_q(&_kernel.ready_q);
1421 #endif
1422 }
1423 
z_impl_k_thread_priority_get(k_tid_t thread)1424 int z_impl_k_thread_priority_get(k_tid_t thread)
1425 {
1426 	return thread->base.prio;
1427 }
1428 
1429 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_get(k_tid_t thread)1430 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1431 {
1432 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1433 	return z_impl_k_thread_priority_get(thread);
1434 }
1435 #include <syscalls/k_thread_priority_get_mrsh.c>
1436 #endif
1437 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)1438 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
1439 {
1440 	/*
1441 	 * Use NULL, since we cannot know what the entry point is (we do not
1442 	 * keep track of it) and idle cannot change its priority.
1443 	 */
1444 	Z_ASSERT_VALID_PRIO(prio, NULL);
1445 	__ASSERT(!arch_is_in_isr(), "");
1446 
1447 	struct k_thread *th = (struct k_thread *)thread;
1448 
1449 	z_thread_priority_set(th, prio);
1450 }
1451 
1452 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)1453 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
1454 {
1455 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1456 	K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
1457 				    "invalid thread priority %d", prio));
1458 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
1459 				    "thread priority may only be downgraded (%d < %d)",
1460 				    prio, thread->base.prio));
1461 
1462 	z_impl_k_thread_priority_set(thread, prio);
1463 }
1464 #include <syscalls/k_thread_priority_set_mrsh.c>
1465 #endif
1466 
1467 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1468 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1469 {
1470 	struct k_thread *thread = tid;
1471 
1472 	K_SPINLOCK(&sched_spinlock) {
1473 		thread->base.prio_deadline = k_cycle_get_32() + deadline;
1474 		if (z_is_thread_queued(thread)) {
1475 			dequeue_thread(thread);
1476 			queue_thread(thread);
1477 		}
1478 	}
1479 }
1480 
1481 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1482 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1483 {
1484 	struct k_thread *thread = tid;
1485 
1486 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1487 	K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1488 				    "invalid thread deadline %d",
1489 				    (int)deadline));
1490 
1491 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1492 }
1493 #include <syscalls/k_thread_deadline_set_mrsh.c>
1494 #endif
1495 #endif
1496 
k_can_yield(void)1497 bool k_can_yield(void)
1498 {
1499 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1500 		 z_is_idle_thread_object(_current));
1501 }
1502 
z_impl_k_yield(void)1503 void z_impl_k_yield(void)
1504 {
1505 	__ASSERT(!arch_is_in_isr(), "");
1506 
1507 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1508 
1509 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1510 
1511 	if (!IS_ENABLED(CONFIG_SMP) ||
1512 	    z_is_thread_queued(_current)) {
1513 		dequeue_thread(_current);
1514 	}
1515 	queue_thread(_current);
1516 	update_cache(1);
1517 	z_swap(&sched_spinlock, key);
1518 }
1519 
1520 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1521 static inline void z_vrfy_k_yield(void)
1522 {
1523 	z_impl_k_yield();
1524 }
1525 #include <syscalls/k_yield_mrsh.c>
1526 #endif
1527 
z_tick_sleep(k_ticks_t ticks)1528 static int32_t z_tick_sleep(k_ticks_t ticks)
1529 {
1530 	uint32_t expected_wakeup_ticks;
1531 
1532 	__ASSERT(!arch_is_in_isr(), "");
1533 
1534 	LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
1535 
1536 #ifdef CONFIG_MULTITHREADING
1537 	/* wait of 0 ms is treated as a 'yield' */
1538 	if (ticks == 0) {
1539 		k_yield();
1540 		return 0;
1541 	}
1542 #endif
1543 
1544 	if (Z_TICK_ABS(ticks) <= 0) {
1545 		expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1546 	} else {
1547 		expected_wakeup_ticks = Z_TICK_ABS(ticks);
1548 	}
1549 
1550 #ifdef CONFIG_MULTITHREADING
1551 	k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
1552 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1553 
1554 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1555 	pending_current = _current;
1556 #endif
1557 	unready_thread(_current);
1558 	z_add_thread_timeout(_current, timeout);
1559 	z_mark_thread_as_suspended(_current);
1560 
1561 	(void)z_swap(&sched_spinlock, key);
1562 
1563 	__ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1564 
1565 	ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
1566 	if (ticks > 0) {
1567 		return ticks;
1568 	}
1569 #else
1570 	/* busy wait to be time coherent since subsystems may depend on it */
1571 	z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks));
1572 #endif
1573 
1574 	return 0;
1575 }
1576 
z_impl_k_sleep(k_timeout_t timeout)1577 int32_t z_impl_k_sleep(k_timeout_t timeout)
1578 {
1579 	k_ticks_t ticks;
1580 
1581 	__ASSERT(!arch_is_in_isr(), "");
1582 
1583 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1584 
1585 	/* in case of K_FOREVER, we suspend */
1586 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1587 #ifdef CONFIG_MULTITHREADING
1588 		k_thread_suspend(_current);
1589 #else
1590 		/* In Single Thread, just wait for an interrupt saving power */
1591 		k_cpu_idle();
1592 #endif
1593 		SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1594 
1595 		return (int32_t) K_TICKS_FOREVER;
1596 	}
1597 
1598 	ticks = timeout.ticks;
1599 
1600 	ticks = z_tick_sleep(ticks);
1601 
1602 	int32_t ret = k_ticks_to_ms_ceil64(ticks);
1603 
1604 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1605 
1606 	return ret;
1607 }
1608 
1609 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1610 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1611 {
1612 	return z_impl_k_sleep(timeout);
1613 }
1614 #include <syscalls/k_sleep_mrsh.c>
1615 #endif
1616 
z_impl_k_usleep(int us)1617 int32_t z_impl_k_usleep(int us)
1618 {
1619 	int32_t ticks;
1620 
1621 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1622 
1623 	ticks = k_us_to_ticks_ceil64(us);
1624 	ticks = z_tick_sleep(ticks);
1625 
1626 	int32_t ret = k_ticks_to_us_ceil64(ticks);
1627 
1628 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1629 
1630 	return ret;
1631 }
1632 
1633 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int us)1634 static inline int32_t z_vrfy_k_usleep(int us)
1635 {
1636 	return z_impl_k_usleep(us);
1637 }
1638 #include <syscalls/k_usleep_mrsh.c>
1639 #endif
1640 
z_impl_k_wakeup(k_tid_t thread)1641 void z_impl_k_wakeup(k_tid_t thread)
1642 {
1643 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1644 
1645 	if (z_is_thread_pending(thread)) {
1646 		return;
1647 	}
1648 
1649 	if (z_abort_thread_timeout(thread) < 0) {
1650 		/* Might have just been sleeping forever */
1651 		if (thread->base.thread_state != _THREAD_SUSPENDED) {
1652 			return;
1653 		}
1654 	}
1655 
1656 	z_mark_thread_as_not_suspended(thread);
1657 	z_ready_thread(thread);
1658 
1659 	flag_ipi();
1660 
1661 	if (!arch_is_in_isr()) {
1662 		z_reschedule_unlocked();
1663 	}
1664 }
1665 
1666 #ifdef CONFIG_TRACE_SCHED_IPI
1667 extern void z_trace_sched_ipi(void);
1668 #endif
1669 
1670 #ifdef CONFIG_SMP
z_sched_ipi(void)1671 void z_sched_ipi(void)
1672 {
1673 	/* NOTE: When adding code to this, make sure this is called
1674 	 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1675 	 */
1676 #ifdef CONFIG_TRACE_SCHED_IPI
1677 	z_trace_sched_ipi();
1678 #endif
1679 
1680 #ifdef CONFIG_TIMESLICING
1681 	if (sliceable(_current)) {
1682 		z_time_slice();
1683 	}
1684 #endif
1685 }
1686 #endif
1687 
1688 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1689 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1690 {
1691 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1692 	z_impl_k_wakeup(thread);
1693 }
1694 #include <syscalls/k_wakeup_mrsh.c>
1695 #endif
1696 
z_impl_k_sched_current_thread_query(void)1697 k_tid_t z_impl_k_sched_current_thread_query(void)
1698 {
1699 #ifdef CONFIG_SMP
1700 	/* In SMP, _current is a field read from _current_cpu, which
1701 	 * can race with preemption before it is read.  We must lock
1702 	 * local interrupts when reading it.
1703 	 */
1704 	unsigned int k = arch_irq_lock();
1705 #endif
1706 
1707 	k_tid_t ret = _current_cpu->current;
1708 
1709 #ifdef CONFIG_SMP
1710 	arch_irq_unlock(k);
1711 #endif
1712 	return ret;
1713 }
1714 
1715 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1716 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1717 {
1718 	return z_impl_k_sched_current_thread_query();
1719 }
1720 #include <syscalls/k_sched_current_thread_query_mrsh.c>
1721 #endif
1722 
z_impl_k_is_preempt_thread(void)1723 int z_impl_k_is_preempt_thread(void)
1724 {
1725 	return !arch_is_in_isr() && is_preempt(_current);
1726 }
1727 
1728 #ifdef CONFIG_USERSPACE
z_vrfy_k_is_preempt_thread(void)1729 static inline int z_vrfy_k_is_preempt_thread(void)
1730 {
1731 	return z_impl_k_is_preempt_thread();
1732 }
1733 #include <syscalls/k_is_preempt_thread_mrsh.c>
1734 #endif
1735 
1736 #ifdef CONFIG_SCHED_CPU_MASK
1737 # ifdef CONFIG_SMP
1738 /* Right now we use a two byte for this mask */
1739 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word");
1740 # endif
1741 
1742 
cpu_mask_mod(k_tid_t thread,uint32_t enable_mask,uint32_t disable_mask)1743 static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
1744 {
1745 	int ret = 0;
1746 
1747 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1748 	__ASSERT(z_is_thread_prevented_from_running(thread),
1749 		 "Running threads cannot change CPU pin");
1750 #endif
1751 
1752 	K_SPINLOCK(&sched_spinlock) {
1753 		if (z_is_thread_prevented_from_running(thread)) {
1754 			thread->base.cpu_mask |= enable_mask;
1755 			thread->base.cpu_mask  &= ~disable_mask;
1756 		} else {
1757 			ret = -EINVAL;
1758 		}
1759 	}
1760 
1761 #if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
1762 		int m = thread->base.cpu_mask;
1763 
1764 		__ASSERT((m == 0) || ((m & (m - 1)) == 0),
1765 			 "Only one CPU allowed in mask when PIN_ONLY");
1766 #endif
1767 
1768 	return ret;
1769 }
1770 
k_thread_cpu_mask_clear(k_tid_t thread)1771 int k_thread_cpu_mask_clear(k_tid_t thread)
1772 {
1773 	return cpu_mask_mod(thread, 0, 0xffffffff);
1774 }
1775 
k_thread_cpu_mask_enable_all(k_tid_t thread)1776 int k_thread_cpu_mask_enable_all(k_tid_t thread)
1777 {
1778 	return cpu_mask_mod(thread, 0xffffffff, 0);
1779 }
1780 
k_thread_cpu_mask_enable(k_tid_t thread,int cpu)1781 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
1782 {
1783 	return cpu_mask_mod(thread, BIT(cpu), 0);
1784 }
1785 
k_thread_cpu_mask_disable(k_tid_t thread,int cpu)1786 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
1787 {
1788 	return cpu_mask_mod(thread, 0, BIT(cpu));
1789 }
1790 
k_thread_cpu_pin(k_tid_t thread,int cpu)1791 int k_thread_cpu_pin(k_tid_t thread, int cpu)
1792 {
1793 	int ret;
1794 
1795 	ret = k_thread_cpu_mask_clear(thread);
1796 	if (ret == 0) {
1797 		return k_thread_cpu_mask_enable(thread, cpu);
1798 	}
1799 	return ret;
1800 }
1801 
1802 #endif /* CONFIG_SCHED_CPU_MASK */
1803 
unpend_all(_wait_q_t * wait_q)1804 static inline void unpend_all(_wait_q_t *wait_q)
1805 {
1806 	struct k_thread *thread;
1807 
1808 	while ((thread = z_waitq_head(wait_q)) != NULL) {
1809 		unpend_thread_no_timeout(thread);
1810 		(void)z_abort_thread_timeout(thread);
1811 		arch_thread_return_value_set(thread, 0);
1812 		ready_thread(thread);
1813 	}
1814 }
1815 
1816 #ifdef CONFIG_CMSIS_RTOS_V1
1817 extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
1818 #endif
1819 
1820 /**
1821  * @brief Dequeues the specified thread
1822  *
1823  * Dequeues the specified thread and move it into the specified new state.
1824  *
1825  * @param thread Identify the thread to halt
1826  * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1827  */
halt_thread(struct k_thread * thread,uint8_t new_state)1828 static void halt_thread(struct k_thread *thread, uint8_t new_state)
1829 {
1830 	/* We hold the lock, and the thread is known not to be running
1831 	 * anywhere.
1832 	 */
1833 	if ((thread->base.thread_state & new_state) == 0U) {
1834 		thread->base.thread_state |= new_state;
1835 		clear_halting(thread);
1836 		if (z_is_thread_queued(thread)) {
1837 			dequeue_thread(thread);
1838 		}
1839 
1840 		if (new_state == _THREAD_DEAD) {
1841 			if (thread->base.pended_on != NULL) {
1842 				unpend_thread_no_timeout(thread);
1843 			}
1844 			(void)z_abort_thread_timeout(thread);
1845 			unpend_all(&thread->join_queue);
1846 		}
1847 #ifdef CONFIG_SMP
1848 		unpend_all(&thread->halt_queue);
1849 #endif
1850 		update_cache(1);
1851 
1852 		if (new_state == _THREAD_SUSPENDED) {
1853 			return;
1854 		}
1855 
1856 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1857 		arch_float_disable(thread);
1858 #endif
1859 
1860 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1861 
1862 		z_thread_monitor_exit(thread);
1863 
1864 #ifdef CONFIG_CMSIS_RTOS_V1
1865 		z_thread_cmsis_status_mask_clear(thread);
1866 #endif
1867 
1868 #ifdef CONFIG_OBJ_CORE_THREAD
1869 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1870 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1871 #endif
1872 		k_obj_core_unlink(K_OBJ_CORE(thread));
1873 #endif
1874 
1875 #ifdef CONFIG_USERSPACE
1876 		z_mem_domain_exit_thread(thread);
1877 		k_thread_perms_all_clear(thread);
1878 		k_object_uninit(thread->stack_obj);
1879 		k_object_uninit(thread);
1880 #endif
1881 	}
1882 }
1883 
z_thread_abort(struct k_thread * thread)1884 void z_thread_abort(struct k_thread *thread)
1885 {
1886 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1887 
1888 	if ((thread->base.user_options & K_ESSENTIAL) != 0) {
1889 		k_spin_unlock(&sched_spinlock, key);
1890 		__ASSERT(false, "aborting essential thread %p", thread);
1891 		k_panic();
1892 		return;
1893 	}
1894 
1895 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1896 		k_spin_unlock(&sched_spinlock, key);
1897 		return;
1898 	}
1899 
1900 	z_thread_halt(thread, key, true);
1901 }
1902 
1903 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(struct k_thread * thread)1904 void z_impl_k_thread_abort(struct k_thread *thread)
1905 {
1906 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1907 
1908 	z_thread_abort(thread);
1909 
1910 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1911 }
1912 #endif
1913 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1914 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1915 {
1916 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1917 	int ret = 0;
1918 
1919 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1920 
1921 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1922 		z_sched_switch_spin(thread);
1923 		ret = 0;
1924 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1925 		ret = -EBUSY;
1926 	} else if ((thread == _current) ||
1927 		   (thread->base.pended_on == &_current->join_queue)) {
1928 		ret = -EDEADLK;
1929 	} else {
1930 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1931 		add_to_waitq_locked(_current, &thread->join_queue);
1932 		add_thread_timeout(_current, timeout);
1933 
1934 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1935 		ret = z_swap(&sched_spinlock, key);
1936 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1937 
1938 		return ret;
1939 	}
1940 
1941 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1942 
1943 	k_spin_unlock(&sched_spinlock, key);
1944 	return ret;
1945 }
1946 
1947 #ifdef CONFIG_USERSPACE
1948 /* Special case: don't oops if the thread is uninitialized.  This is because
1949  * the initialization bit does double-duty for thread objects; if false, means
1950  * the thread object is truly uninitialized, or the thread ran and exited for
1951  * some reason.
1952  *
1953  * Return true in this case indicating we should just do nothing and return
1954  * success to the caller.
1955  */
thread_obj_validate(struct k_thread * thread)1956 static bool thread_obj_validate(struct k_thread *thread)
1957 {
1958 	struct k_object *ko = k_object_find(thread);
1959 	int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1960 
1961 	switch (ret) {
1962 	case 0:
1963 		return false;
1964 	case -EINVAL:
1965 		return true;
1966 	default:
1967 #ifdef CONFIG_LOG
1968 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1969 #endif
1970 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1971 	}
1972 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1973 }
1974 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1975 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1976 				       k_timeout_t timeout)
1977 {
1978 	if (thread_obj_validate(thread)) {
1979 		return 0;
1980 	}
1981 
1982 	return z_impl_k_thread_join(thread, timeout);
1983 }
1984 #include <syscalls/k_thread_join_mrsh.c>
1985 
z_vrfy_k_thread_abort(k_tid_t thread)1986 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1987 {
1988 	if (thread_obj_validate(thread)) {
1989 		return;
1990 	}
1991 
1992 	K_OOPS(K_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
1993 				    "aborting essential thread %p", thread));
1994 
1995 	z_impl_k_thread_abort((struct k_thread *)thread);
1996 }
1997 #include <syscalls/k_thread_abort_mrsh.c>
1998 #endif /* CONFIG_USERSPACE */
1999 
2000 /*
2001  * future scheduler.h API implementations
2002  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)2003 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
2004 {
2005 	struct k_thread *thread;
2006 	bool ret = false;
2007 
2008 	K_SPINLOCK(&sched_spinlock) {
2009 		thread = _priq_wait_best(&wait_q->waitq);
2010 
2011 		if (thread != NULL) {
2012 			z_thread_return_value_set_with_data(thread,
2013 							    swap_retval,
2014 							    swap_data);
2015 			unpend_thread_no_timeout(thread);
2016 			(void)z_abort_thread_timeout(thread);
2017 			ready_thread(thread);
2018 			ret = true;
2019 		}
2020 	}
2021 
2022 	return ret;
2023 }
2024 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)2025 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
2026 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
2027 {
2028 	int ret = z_pend_curr(lock, key, wait_q, timeout);
2029 
2030 	if (data != NULL) {
2031 		*data = _current->base.swap_data;
2032 	}
2033 	return ret;
2034 }
2035 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)2036 int z_sched_waitq_walk(_wait_q_t  *wait_q,
2037 		       int (*func)(struct k_thread *, void *), void *data)
2038 {
2039 	struct k_thread *thread;
2040 	int  status = 0;
2041 
2042 	K_SPINLOCK(&sched_spinlock) {
2043 		_WAIT_Q_FOR_EACH(wait_q, thread) {
2044 
2045 			/*
2046 			 * Invoke the callback function on each waiting thread
2047 			 * for as long as there are both waiting threads AND
2048 			 * it returns 0.
2049 			 */
2050 
2051 			status = func(thread, data);
2052 			if (status != 0) {
2053 				break;
2054 			}
2055 		}
2056 	}
2057 
2058 	return status;
2059 }
2060