1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <zephyr/kernel/sched_priq.h>
10 #include <zephyr/wait_q.h>
11 #include <kswap.h>
12 #include <kernel_arch_func.h>
13 #include <zephyr/syscall_handler.h>
14 #include <zephyr/drivers/timer/system_timer.h>
15 #include <stdbool.h>
16 #include <kernel_internal.h>
17 #include <zephyr/logging/log.h>
18 #include <zephyr/sys/atomic.h>
19 #include <zephyr/sys/math_extras.h>
20 #include <zephyr/timing/timing.h>
21 #include <zephyr/sys/util.h>
22 
23 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
24 
25 #if defined(CONFIG_SCHED_DUMB)
26 #define _priq_run_add		z_priq_dumb_add
27 #define _priq_run_remove	z_priq_dumb_remove
28 # if defined(CONFIG_SCHED_CPU_MASK)
29 #  define _priq_run_best	_priq_dumb_mask_best
30 # else
31 #  define _priq_run_best	z_priq_dumb_best
32 # endif
33 #elif defined(CONFIG_SCHED_SCALABLE)
34 #define _priq_run_add		z_priq_rb_add
35 #define _priq_run_remove	z_priq_rb_remove
36 #define _priq_run_best		z_priq_rb_best
37 #elif defined(CONFIG_SCHED_MULTIQ)
38 #define _priq_run_add		z_priq_mq_add
39 #define _priq_run_remove	z_priq_mq_remove
40 #define _priq_run_best		z_priq_mq_best
41 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
42 					struct k_thread *thread);
43 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
44 					   struct k_thread *thread);
45 #endif
46 
47 #if defined(CONFIG_WAITQ_SCALABLE)
48 #define z_priq_wait_add		z_priq_rb_add
49 #define _priq_wait_remove	z_priq_rb_remove
50 #define _priq_wait_best		z_priq_rb_best
51 #elif defined(CONFIG_WAITQ_DUMB)
52 #define z_priq_wait_add		z_priq_dumb_add
53 #define _priq_wait_remove	z_priq_dumb_remove
54 #define _priq_wait_best		z_priq_dumb_best
55 #endif
56 
57 struct k_spinlock sched_spinlock;
58 
59 static void update_cache(int preempt_ok);
60 static void end_thread(struct k_thread *thread);
61 
62 
is_preempt(struct k_thread * thread)63 static inline int is_preempt(struct k_thread *thread)
64 {
65 	/* explanation in kernel_struct.h */
66 	return thread->base.preempt <= _PREEMPT_THRESHOLD;
67 }
68 
is_metairq(struct k_thread * thread)69 static inline int is_metairq(struct k_thread *thread)
70 {
71 #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
72 	return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
73 		< CONFIG_NUM_METAIRQ_PRIORITIES;
74 #else
75 	return 0;
76 #endif
77 }
78 
79 #if CONFIG_ASSERT
is_thread_dummy(struct k_thread * thread)80 static inline bool is_thread_dummy(struct k_thread *thread)
81 {
82 	return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
83 }
84 #endif
85 
86 /*
87  * Return value same as e.g. memcmp
88  * > 0 -> thread 1 priority  > thread 2 priority
89  * = 0 -> thread 1 priority == thread 2 priority
90  * < 0 -> thread 1 priority  < thread 2 priority
91  * Do not rely on the actual value returned aside from the above.
92  * (Again, like memcmp.)
93  */
z_sched_prio_cmp(struct k_thread * thread_1,struct k_thread * thread_2)94 int32_t z_sched_prio_cmp(struct k_thread *thread_1,
95 	struct k_thread *thread_2)
96 {
97 	/* `prio` is <32b, so the below cannot overflow. */
98 	int32_t b1 = thread_1->base.prio;
99 	int32_t b2 = thread_2->base.prio;
100 
101 	if (b1 != b2) {
102 		return b2 - b1;
103 	}
104 
105 #ifdef CONFIG_SCHED_DEADLINE
106 	/* If we assume all deadlines live within the same "half" of
107 	 * the 32 bit modulus space (this is a documented API rule),
108 	 * then the latest deadline in the queue minus the earliest is
109 	 * guaranteed to be (2's complement) non-negative.  We can
110 	 * leverage that to compare the values without having to check
111 	 * the current time.
112 	 */
113 	uint32_t d1 = thread_1->base.prio_deadline;
114 	uint32_t d2 = thread_2->base.prio_deadline;
115 
116 	if (d1 != d2) {
117 		/* Sooner deadline means higher effective priority.
118 		 * Doing the calculation with unsigned types and casting
119 		 * to signed isn't perfect, but at least reduces this
120 		 * from UB on overflow to impdef.
121 		 */
122 		return (int32_t) (d2 - d1);
123 	}
124 #endif
125 	return 0;
126 }
127 
should_preempt(struct k_thread * thread,int preempt_ok)128 static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
129 					 int preempt_ok)
130 {
131 	/* Preemption is OK if it's being explicitly allowed by
132 	 * software state (e.g. the thread called k_yield())
133 	 */
134 	if (preempt_ok != 0) {
135 		return true;
136 	}
137 
138 	__ASSERT(_current != NULL, "");
139 
140 	/* Or if we're pended/suspended/dummy (duh) */
141 	if (z_is_thread_prevented_from_running(_current)) {
142 		return true;
143 	}
144 
145 	/* Edge case on ARM where a thread can be pended out of an
146 	 * interrupt handler before the "synchronous" swap starts
147 	 * context switching.  Platforms with atomic swap can never
148 	 * hit this.
149 	 */
150 	if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
151 	    && z_is_thread_timeout_active(thread)) {
152 		return true;
153 	}
154 
155 	/* Otherwise we have to be running a preemptible thread or
156 	 * switching to a metairq
157 	 */
158 	if (is_preempt(_current) || is_metairq(thread)) {
159 		return true;
160 	}
161 
162 	return false;
163 }
164 
165 #ifdef CONFIG_SCHED_CPU_MASK
_priq_dumb_mask_best(sys_dlist_t * pq)166 static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
167 {
168 	/* With masks enabled we need to be prepared to walk the list
169 	 * looking for one we can run
170 	 */
171 	struct k_thread *thread;
172 
173 	SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
174 		if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
175 			return thread;
176 		}
177 	}
178 	return NULL;
179 }
180 #endif
181 
182 #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
z_priq_dumb_add(sys_dlist_t * pq,struct k_thread * thread)183 static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
184 					  struct k_thread *thread)
185 {
186 	struct k_thread *t;
187 
188 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
189 
190 	SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
191 		if (z_sched_prio_cmp(thread, t) > 0) {
192 			sys_dlist_insert(&t->base.qnode_dlist,
193 					 &thread->base.qnode_dlist);
194 			return;
195 		}
196 	}
197 
198 	sys_dlist_append(pq, &thread->base.qnode_dlist);
199 }
200 #endif
201 
thread_runq(struct k_thread * thread)202 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
203 {
204 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
205 	int cpu, m = thread->base.cpu_mask;
206 
207 	/* Edge case: it's legal per the API to "make runnable" a
208 	 * thread with all CPUs masked off (i.e. one that isn't
209 	 * actually runnable!).  Sort of a wart in the API and maybe
210 	 * we should address this in docs/assertions instead to avoid
211 	 * the extra test.
212 	 */
213 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
214 
215 	return &_kernel.cpus[cpu].ready_q.runq;
216 #else
217 	return &_kernel.ready_q.runq;
218 #endif
219 }
220 
curr_cpu_runq(void)221 static ALWAYS_INLINE void *curr_cpu_runq(void)
222 {
223 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
224 	return &arch_curr_cpu()->ready_q.runq;
225 #else
226 	return &_kernel.ready_q.runq;
227 #endif
228 }
229 
runq_add(struct k_thread * thread)230 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
231 {
232 	_priq_run_add(thread_runq(thread), thread);
233 }
234 
runq_remove(struct k_thread * thread)235 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
236 {
237 	_priq_run_remove(thread_runq(thread), thread);
238 }
239 
runq_best(void)240 static ALWAYS_INLINE struct k_thread *runq_best(void)
241 {
242 	return _priq_run_best(curr_cpu_runq());
243 }
244 
245 /* _current is never in the run queue until context switch on
246  * SMP configurations, see z_requeue_current()
247  */
should_queue_thread(struct k_thread * th)248 static inline bool should_queue_thread(struct k_thread *th)
249 {
250 	return !IS_ENABLED(CONFIG_SMP) || th != _current;
251 }
252 
queue_thread(struct k_thread * thread)253 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
254 {
255 	thread->base.thread_state |= _THREAD_QUEUED;
256 	if (should_queue_thread(thread)) {
257 		runq_add(thread);
258 	}
259 #ifdef CONFIG_SMP
260 	if (thread == _current) {
261 		/* add current to end of queue means "yield" */
262 		_current_cpu->swap_ok = true;
263 	}
264 #endif
265 }
266 
dequeue_thread(struct k_thread * thread)267 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
268 {
269 	thread->base.thread_state &= ~_THREAD_QUEUED;
270 	if (should_queue_thread(thread)) {
271 		runq_remove(thread);
272 	}
273 }
274 
signal_pending_ipi(void)275 static void signal_pending_ipi(void)
276 {
277 	/* Synchronization note: you might think we need to lock these
278 	 * two steps, but an IPI is idempotent.  It's OK if we do it
279 	 * twice.  All we require is that if a CPU sees the flag true,
280 	 * it is guaranteed to send the IPI, and if a core sets
281 	 * pending_ipi, the IPI will be sent the next time through
282 	 * this code.
283 	 */
284 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
285 	if (arch_num_cpus() > 1) {
286 		if (_kernel.pending_ipi) {
287 			_kernel.pending_ipi = false;
288 			arch_sched_ipi();
289 		}
290 	}
291 #endif
292 }
293 
294 #ifdef CONFIG_SMP
295 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
296  * never live in the run queue until we are inexorably on the context
297  * switch path on SMP, otherwise there is a deadlock condition where a
298  * set of CPUs pick a cycle of threads to run and wait for them all to
299  * context switch forever.
300  */
z_requeue_current(struct k_thread * curr)301 void z_requeue_current(struct k_thread *curr)
302 {
303 	if (z_is_thread_queued(curr)) {
304 		runq_add(curr);
305 	}
306 	signal_pending_ipi();
307 }
308 
is_aborting(struct k_thread * thread)309 static inline bool is_aborting(struct k_thread *thread)
310 {
311 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
312 }
313 #endif
314 
next_up(void)315 static ALWAYS_INLINE struct k_thread *next_up(void)
316 {
317 	struct k_thread *thread = runq_best();
318 
319 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
320 	/* MetaIRQs must always attempt to return back to a
321 	 * cooperative thread they preempted and not whatever happens
322 	 * to be highest priority now. The cooperative thread was
323 	 * promised it wouldn't be preempted (by non-metairq threads)!
324 	 */
325 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
326 
327 	if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
328 		if (!z_is_thread_prevented_from_running(mirqp)) {
329 			thread = mirqp;
330 		} else {
331 			_current_cpu->metairq_preempted = NULL;
332 		}
333 	}
334 #endif
335 
336 #ifndef CONFIG_SMP
337 	/* In uniprocessor mode, we can leave the current thread in
338 	 * the queue (actually we have to, otherwise the assembly
339 	 * context switch code for all architectures would be
340 	 * responsible for putting it back in z_swap and ISR return!),
341 	 * which makes this choice simple.
342 	 */
343 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
344 #else
345 	/* Under SMP, the "cache" mechanism for selecting the next
346 	 * thread doesn't work, so we have more work to do to test
347 	 * _current against the best choice from the queue.  Here, the
348 	 * thread selected above represents "the best thread that is
349 	 * not current".
350 	 *
351 	 * Subtle note on "queued": in SMP mode, _current does not
352 	 * live in the queue, so this isn't exactly the same thing as
353 	 * "ready", it means "is _current already added back to the
354 	 * queue such that we don't want to re-add it".
355 	 */
356 	bool queued = z_is_thread_queued(_current);
357 	bool active = !z_is_thread_prevented_from_running(_current);
358 
359 	if (thread == NULL) {
360 		thread = _current_cpu->idle_thread;
361 	}
362 
363 	if (active) {
364 		int32_t cmp = z_sched_prio_cmp(_current, thread);
365 
366 		/* Ties only switch if state says we yielded */
367 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
368 			thread = _current;
369 		}
370 
371 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
372 			thread = _current;
373 		}
374 	}
375 
376 	/* Put _current back into the queue */
377 	if (thread != _current && active &&
378 		!z_is_idle_thread_object(_current) && !queued) {
379 		queue_thread(_current);
380 	}
381 
382 	/* Take the new _current out of the queue */
383 	if (z_is_thread_queued(thread)) {
384 		dequeue_thread(thread);
385 	}
386 
387 	_current_cpu->swap_ok = false;
388 	return thread;
389 #endif
390 }
391 
move_thread_to_end_of_prio_q(struct k_thread * thread)392 static void move_thread_to_end_of_prio_q(struct k_thread *thread)
393 {
394 	if (z_is_thread_queued(thread)) {
395 		dequeue_thread(thread);
396 	}
397 	queue_thread(thread);
398 	update_cache(thread == _current);
399 }
400 
flag_ipi(void)401 static void flag_ipi(void)
402 {
403 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
404 	if (arch_num_cpus() > 1) {
405 		_kernel.pending_ipi = true;
406 	}
407 #endif
408 }
409 
410 #ifdef CONFIG_TIMESLICING
411 
412 static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
413 static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
414 static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
415 static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
416 
417 #ifdef CONFIG_SWAP_NONATOMIC
418 /* If z_swap() isn't atomic, then it's possible for a timer interrupt
419  * to try to timeslice away _current after it has already pended
420  * itself but before the corresponding context switch.  Treat that as
421  * a noop condition in z_time_slice().
422  */
423 static struct k_thread *pending_current;
424 #endif
425 
slice_time(struct k_thread * thread)426 static inline int slice_time(struct k_thread *thread)
427 {
428 	int ret = slice_ticks;
429 
430 #ifdef CONFIG_TIMESLICE_PER_THREAD
431 	if (thread->base.slice_ticks != 0) {
432 		ret = thread->base.slice_ticks;
433 	}
434 #endif
435 	return ret;
436 }
437 
sliceable(struct k_thread * thread)438 static inline bool sliceable(struct k_thread *thread)
439 {
440 	bool ret = is_preempt(thread)
441 		&& slice_time(thread) != 0
442 		&& !z_is_prio_higher(thread->base.prio, slice_max_prio)
443 		&& !z_is_thread_prevented_from_running(thread)
444 		&& !z_is_idle_thread_object(thread);
445 
446 #ifdef CONFIG_TIMESLICE_PER_THREAD
447 	ret |= thread->base.slice_ticks != 0;
448 #endif
449 
450 	return ret;
451 }
452 
slice_timeout(struct _timeout * t)453 static void slice_timeout(struct _timeout *t)
454 {
455 	int cpu = ARRAY_INDEX(slice_timeouts, t);
456 
457 	slice_expired[cpu] = true;
458 
459 	/* We need an IPI if we just handled a timeslice expiration
460 	 * for a different CPU.  Ideally this would be able to target
461 	 * the specific core, but that's not part of the API yet.
462 	 */
463 	if (IS_ENABLED(CONFIG_SMP) && cpu != _current_cpu->id) {
464 		flag_ipi();
465 	}
466 }
467 
z_reset_time_slice(struct k_thread * curr)468 void z_reset_time_slice(struct k_thread *curr)
469 {
470 	int cpu = _current_cpu->id;
471 
472 	z_abort_timeout(&slice_timeouts[cpu]);
473 	slice_expired[cpu] = false;
474 	if (sliceable(curr)) {
475 		z_add_timeout(&slice_timeouts[cpu], slice_timeout,
476 			      K_TICKS(slice_time(curr) - 1));
477 	}
478 }
479 
k_sched_time_slice_set(int32_t slice,int prio)480 void k_sched_time_slice_set(int32_t slice, int prio)
481 {
482 	LOCKED(&sched_spinlock) {
483 		slice_ticks = k_ms_to_ticks_ceil32(slice);
484 		slice_max_prio = prio;
485 		z_reset_time_slice(_current);
486 	}
487 }
488 
489 #ifdef CONFIG_TIMESLICE_PER_THREAD
k_thread_time_slice_set(struct k_thread * th,int32_t slice_ticks,k_thread_timeslice_fn_t expired,void * data)490 void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
491 			     k_thread_timeslice_fn_t expired, void *data)
492 {
493 	LOCKED(&sched_spinlock) {
494 		th->base.slice_ticks = slice_ticks;
495 		th->base.slice_expired = expired;
496 		th->base.slice_data = data;
497 	}
498 }
499 #endif
500 
501 /* Called out of each timer interrupt */
z_time_slice(void)502 void z_time_slice(void)
503 {
504 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
505 	struct k_thread *curr = _current;
506 
507 #ifdef CONFIG_SWAP_NONATOMIC
508 	if (pending_current == curr) {
509 		z_reset_time_slice(curr);
510 		k_spin_unlock(&sched_spinlock, key);
511 		return;
512 	}
513 	pending_current = NULL;
514 #endif
515 
516 	if (slice_expired[_current_cpu->id] && sliceable(curr)) {
517 #ifdef CONFIG_TIMESLICE_PER_THREAD
518 		if (curr->base.slice_expired) {
519 			k_spin_unlock(&sched_spinlock, key);
520 			curr->base.slice_expired(curr, curr->base.slice_data);
521 			key = k_spin_lock(&sched_spinlock);
522 		}
523 #endif
524 		if (!z_is_thread_prevented_from_running(curr)) {
525 			move_thread_to_end_of_prio_q(curr);
526 		}
527 		z_reset_time_slice(curr);
528 	}
529 	k_spin_unlock(&sched_spinlock, key);
530 }
531 #endif
532 
533 /* Track cooperative threads preempted by metairqs so we can return to
534  * them specifically.  Called at the moment a new thread has been
535  * selected to run.
536  */
update_metairq_preempt(struct k_thread * thread)537 static void update_metairq_preempt(struct k_thread *thread)
538 {
539 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
540 	if (is_metairq(thread) && !is_metairq(_current) &&
541 	    !is_preempt(_current)) {
542 		/* Record new preemption */
543 		_current_cpu->metairq_preempted = _current;
544 	} else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
545 		/* Returning from existing preemption */
546 		_current_cpu->metairq_preempted = NULL;
547 	}
548 #endif
549 }
550 
update_cache(int preempt_ok)551 static void update_cache(int preempt_ok)
552 {
553 #ifndef CONFIG_SMP
554 	struct k_thread *thread = next_up();
555 
556 	if (should_preempt(thread, preempt_ok)) {
557 #ifdef CONFIG_TIMESLICING
558 		if (thread != _current) {
559 			z_reset_time_slice(thread);
560 		}
561 #endif
562 		update_metairq_preempt(thread);
563 		_kernel.ready_q.cache = thread;
564 	} else {
565 		_kernel.ready_q.cache = _current;
566 	}
567 
568 #else
569 	/* The way this works is that the CPU record keeps its
570 	 * "cooperative swapping is OK" flag until the next reschedule
571 	 * call or context switch.  It doesn't need to be tracked per
572 	 * thread because if the thread gets preempted for whatever
573 	 * reason the scheduler will make the same decision anyway.
574 	 */
575 	_current_cpu->swap_ok = preempt_ok;
576 #endif
577 }
578 
thread_active_elsewhere(struct k_thread * thread)579 static bool thread_active_elsewhere(struct k_thread *thread)
580 {
581 	/* True if the thread is currently running on another CPU.
582 	 * There are more scalable designs to answer this question in
583 	 * constant time, but this is fine for now.
584 	 */
585 #ifdef CONFIG_SMP
586 	int currcpu = _current_cpu->id;
587 
588 	unsigned int num_cpus = arch_num_cpus();
589 
590 	for (int i = 0; i < num_cpus; i++) {
591 		if ((i != currcpu) &&
592 		    (_kernel.cpus[i].current == thread)) {
593 			return true;
594 		}
595 	}
596 #endif
597 	return false;
598 }
599 
ready_thread(struct k_thread * thread)600 static void ready_thread(struct k_thread *thread)
601 {
602 #ifdef CONFIG_KERNEL_COHERENCE
603 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
604 #endif
605 
606 	/* If thread is queued already, do not try and added it to the
607 	 * run queue again
608 	 */
609 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
610 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
611 
612 		queue_thread(thread);
613 		update_cache(0);
614 		flag_ipi();
615 	}
616 }
617 
z_ready_thread(struct k_thread * thread)618 void z_ready_thread(struct k_thread *thread)
619 {
620 	LOCKED(&sched_spinlock) {
621 		if (!thread_active_elsewhere(thread)) {
622 			ready_thread(thread);
623 		}
624 	}
625 }
626 
z_move_thread_to_end_of_prio_q(struct k_thread * thread)627 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
628 {
629 	LOCKED(&sched_spinlock) {
630 		move_thread_to_end_of_prio_q(thread);
631 	}
632 }
633 
z_sched_start(struct k_thread * thread)634 void z_sched_start(struct k_thread *thread)
635 {
636 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
637 
638 	if (z_has_thread_started(thread)) {
639 		k_spin_unlock(&sched_spinlock, key);
640 		return;
641 	}
642 
643 	z_mark_thread_as_started(thread);
644 	ready_thread(thread);
645 	z_reschedule(&sched_spinlock, key);
646 }
647 
z_impl_k_thread_suspend(struct k_thread * thread)648 void z_impl_k_thread_suspend(struct k_thread *thread)
649 {
650 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
651 
652 	(void)z_abort_thread_timeout(thread);
653 
654 	LOCKED(&sched_spinlock) {
655 		if (z_is_thread_queued(thread)) {
656 			dequeue_thread(thread);
657 		}
658 		z_mark_thread_as_suspended(thread);
659 		update_cache(thread == _current);
660 	}
661 
662 	if (thread == _current) {
663 		z_reschedule_unlocked();
664 	}
665 
666 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
667 }
668 
669 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(struct k_thread * thread)670 static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
671 {
672 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
673 	z_impl_k_thread_suspend(thread);
674 }
675 #include <syscalls/k_thread_suspend_mrsh.c>
676 #endif
677 
z_impl_k_thread_resume(struct k_thread * thread)678 void z_impl_k_thread_resume(struct k_thread *thread)
679 {
680 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
681 
682 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
683 
684 	/* Do not try to resume a thread that was not suspended */
685 	if (!z_is_thread_suspended(thread)) {
686 		k_spin_unlock(&sched_spinlock, key);
687 		return;
688 	}
689 
690 	z_mark_thread_as_not_suspended(thread);
691 	ready_thread(thread);
692 
693 	z_reschedule(&sched_spinlock, key);
694 
695 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
696 }
697 
698 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(struct k_thread * thread)699 static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
700 {
701 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
702 	z_impl_k_thread_resume(thread);
703 }
704 #include <syscalls/k_thread_resume_mrsh.c>
705 #endif
706 
pended_on_thread(struct k_thread * thread)707 static _wait_q_t *pended_on_thread(struct k_thread *thread)
708 {
709 	__ASSERT_NO_MSG(thread->base.pended_on);
710 
711 	return thread->base.pended_on;
712 }
713 
unready_thread(struct k_thread * thread)714 static void unready_thread(struct k_thread *thread)
715 {
716 	if (z_is_thread_queued(thread)) {
717 		dequeue_thread(thread);
718 	}
719 	update_cache(thread == _current);
720 }
721 
722 /* sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)723 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
724 {
725 	unready_thread(thread);
726 	z_mark_thread_as_pending(thread);
727 
728 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
729 
730 	if (wait_q != NULL) {
731 		thread->base.pended_on = wait_q;
732 		z_priq_wait_add(&wait_q->waitq, thread);
733 	}
734 }
735 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)736 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
737 {
738 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
739 		z_add_thread_timeout(thread, timeout);
740 	}
741 }
742 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)743 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
744 			k_timeout_t timeout)
745 {
746 #ifdef CONFIG_KERNEL_COHERENCE
747 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
748 #endif
749 	add_to_waitq_locked(thread, wait_q);
750 	add_thread_timeout(thread, timeout);
751 }
752 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)753 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
754 		   k_timeout_t timeout)
755 {
756 	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
757 	LOCKED(&sched_spinlock) {
758 		pend_locked(thread, wait_q, timeout);
759 	}
760 }
761 
unpend_thread_no_timeout(struct k_thread * thread)762 static inline void unpend_thread_no_timeout(struct k_thread *thread)
763 {
764 	_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
765 	z_mark_thread_as_not_pending(thread);
766 	thread->base.pended_on = NULL;
767 }
768 
z_unpend_thread_no_timeout(struct k_thread * thread)769 ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
770 {
771 	LOCKED(&sched_spinlock) {
772 		if (thread->base.pended_on != NULL) {
773 			unpend_thread_no_timeout(thread);
774 		}
775 	}
776 }
777 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)778 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
779 {
780 	LOCKED(&sched_spinlock) {
781 		bool killed = ((thread->base.thread_state & _THREAD_DEAD) ||
782 			       (thread->base.thread_state & _THREAD_ABORTING));
783 
784 #ifdef CONFIG_EVENTS
785 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
786 
787 		thread->no_wake_on_timeout = false;
788 
789 		if (do_nothing) {
790 			continue;
791 		}
792 #endif
793 
794 		if (!killed) {
795 			/* The thread is not being killed */
796 			if (thread->base.pended_on != NULL) {
797 				unpend_thread_no_timeout(thread);
798 			}
799 			z_mark_thread_as_started(thread);
800 			if (is_timeout) {
801 				z_mark_thread_as_not_suspended(thread);
802 			}
803 			ready_thread(thread);
804 		}
805 	}
806 
807 }
808 
809 #ifdef CONFIG_SYS_CLOCK_EXISTS
810 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)811 void z_thread_timeout(struct _timeout *timeout)
812 {
813 	struct k_thread *thread = CONTAINER_OF(timeout,
814 					       struct k_thread, base.timeout);
815 
816 	z_sched_wake_thread(thread, true);
817 }
818 #endif
819 
z_pend_curr_irqlock(uint32_t key,_wait_q_t * wait_q,k_timeout_t timeout)820 int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
821 {
822 	/* This is a legacy API for pre-switch architectures and isn't
823 	 * correctly synchronized for multi-cpu use
824 	 */
825 	__ASSERT_NO_MSG(!IS_ENABLED(CONFIG_SMP));
826 
827 	pend_locked(_current, wait_q, timeout);
828 
829 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
830 	pending_current = _current;
831 
832 	int ret = z_swap_irqlock(key);
833 	LOCKED(&sched_spinlock) {
834 		if (pending_current == _current) {
835 			pending_current = NULL;
836 		}
837 	}
838 	return ret;
839 #else
840 	return z_swap_irqlock(key);
841 #endif
842 }
843 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)844 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
845 	       _wait_q_t *wait_q, k_timeout_t timeout)
846 {
847 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
848 	pending_current = _current;
849 #endif
850 	__ASSERT_NO_MSG(sizeof(sched_spinlock) == 0 || lock != &sched_spinlock);
851 
852 	/* We do a "lock swap" prior to calling z_swap(), such that
853 	 * the caller's lock gets released as desired.  But we ensure
854 	 * that we hold the scheduler lock and leave local interrupts
855 	 * masked until we reach the context swich.  z_swap() itself
856 	 * has similar code; the duplication is because it's a legacy
857 	 * API that doesn't expect to be called with scheduler lock
858 	 * held.
859 	 */
860 	(void) k_spin_lock(&sched_spinlock);
861 	pend_locked(_current, wait_q, timeout);
862 	k_spin_release(lock);
863 	return z_swap(&sched_spinlock, key);
864 }
865 
z_unpend1_no_timeout(_wait_q_t * wait_q)866 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
867 {
868 	struct k_thread *thread = NULL;
869 
870 	LOCKED(&sched_spinlock) {
871 		thread = _priq_wait_best(&wait_q->waitq);
872 
873 		if (thread != NULL) {
874 			unpend_thread_no_timeout(thread);
875 		}
876 	}
877 
878 	return thread;
879 }
880 
z_unpend_first_thread(_wait_q_t * wait_q)881 struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
882 {
883 	struct k_thread *thread = NULL;
884 
885 	LOCKED(&sched_spinlock) {
886 		thread = _priq_wait_best(&wait_q->waitq);
887 
888 		if (thread != NULL) {
889 			unpend_thread_no_timeout(thread);
890 			(void)z_abort_thread_timeout(thread);
891 		}
892 	}
893 
894 	return thread;
895 }
896 
z_unpend_thread(struct k_thread * thread)897 void z_unpend_thread(struct k_thread *thread)
898 {
899 	z_unpend_thread_no_timeout(thread);
900 	(void)z_abort_thread_timeout(thread);
901 }
902 
903 /* Priority set utility that does no rescheduling, it just changes the
904  * run queue state, returning true if a reschedule is needed later.
905  */
z_set_prio(struct k_thread * thread,int prio)906 bool z_set_prio(struct k_thread *thread, int prio)
907 {
908 	bool need_sched = 0;
909 
910 	LOCKED(&sched_spinlock) {
911 		need_sched = z_is_thread_ready(thread);
912 
913 		if (need_sched) {
914 			/* Don't requeue on SMP if it's the running thread */
915 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
916 				dequeue_thread(thread);
917 				thread->base.prio = prio;
918 				queue_thread(thread);
919 			} else {
920 				thread->base.prio = prio;
921 			}
922 			update_cache(1);
923 		} else {
924 			thread->base.prio = prio;
925 		}
926 	}
927 
928 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
929 
930 	return need_sched;
931 }
932 
z_thread_priority_set(struct k_thread * thread,int prio)933 void z_thread_priority_set(struct k_thread *thread, int prio)
934 {
935 	bool need_sched = z_set_prio(thread, prio);
936 
937 	flag_ipi();
938 
939 	if (need_sched && _current->base.sched_locked == 0U) {
940 		z_reschedule_unlocked();
941 	}
942 }
943 
resched(uint32_t key)944 static inline bool resched(uint32_t key)
945 {
946 #ifdef CONFIG_SMP
947 	_current_cpu->swap_ok = 0;
948 #endif
949 
950 	return arch_irq_unlocked(key) && !arch_is_in_isr();
951 }
952 
953 /*
954  * Check if the next ready thread is the same as the current thread
955  * and save the trip if true.
956  */
need_swap(void)957 static inline bool need_swap(void)
958 {
959 	/* the SMP case will be handled in C based z_swap() */
960 #ifdef CONFIG_SMP
961 	return true;
962 #else
963 	struct k_thread *new_thread;
964 
965 	/* Check if the next ready thread is the same as the current thread */
966 	new_thread = _kernel.ready_q.cache;
967 	return new_thread != _current;
968 #endif
969 }
970 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)971 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
972 {
973 	if (resched(key.key) && need_swap()) {
974 		z_swap(lock, key);
975 	} else {
976 		k_spin_unlock(lock, key);
977 		signal_pending_ipi();
978 	}
979 }
980 
z_reschedule_irqlock(uint32_t key)981 void z_reschedule_irqlock(uint32_t key)
982 {
983 	if (resched(key)) {
984 		z_swap_irqlock(key);
985 	} else {
986 		irq_unlock(key);
987 		signal_pending_ipi();
988 	}
989 }
990 
k_sched_lock(void)991 void k_sched_lock(void)
992 {
993 	LOCKED(&sched_spinlock) {
994 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
995 
996 		z_sched_lock();
997 	}
998 }
999 
k_sched_unlock(void)1000 void k_sched_unlock(void)
1001 {
1002 	LOCKED(&sched_spinlock) {
1003 		__ASSERT(_current->base.sched_locked != 0U, "");
1004 		__ASSERT(!arch_is_in_isr(), "");
1005 
1006 		++_current->base.sched_locked;
1007 		update_cache(0);
1008 	}
1009 
1010 	LOG_DBG("scheduler unlocked (%p:%d)",
1011 		_current, _current->base.sched_locked);
1012 
1013 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
1014 
1015 	z_reschedule_unlocked();
1016 }
1017 
z_swap_next_thread(void)1018 struct k_thread *z_swap_next_thread(void)
1019 {
1020 #ifdef CONFIG_SMP
1021 	struct k_thread *ret = next_up();
1022 
1023 	if (ret == _current) {
1024 		/* When not swapping, have to signal IPIs here.  In
1025 		 * the context switch case it must happen later, after
1026 		 * _current gets requeued.
1027 		 */
1028 		signal_pending_ipi();
1029 	}
1030 	return ret;
1031 #else
1032 	return _kernel.ready_q.cache;
1033 #endif
1034 }
1035 
1036 #ifdef CONFIG_USE_SWITCH
1037 /* Just a wrapper around _current = xxx with tracing */
set_current(struct k_thread * new_thread)1038 static inline void set_current(struct k_thread *new_thread)
1039 {
1040 	z_thread_mark_switched_out();
1041 	_current_cpu->current = new_thread;
1042 }
1043 
1044 /**
1045  * @brief Determine next thread to execute upon completion of an interrupt
1046  *
1047  * Thread preemption is performed by context switching after the completion
1048  * of a non-recursed interrupt. This function determines which thread to
1049  * switch to if any. This function accepts as @p interrupted either:
1050  *
1051  * - The handle for the interrupted thread in which case the thread's context
1052  *   must already be fully saved and ready to be picked up by a different CPU.
1053  *
1054  * - NULL if more work is required to fully save the thread's state after
1055  *   it is known that a new thread is to be scheduled. It is up to the caller
1056  *   to store the handle resulting from the thread that is being switched out
1057  *   in that thread's "switch_handle" field after its
1058  *   context has fully been saved, following the same requirements as with
1059  *   the @ref arch_switch() function.
1060  *
1061  * If a new thread needs to be scheduled then its handle is returned.
1062  * Otherwise the same value provided as @p interrupted is returned back.
1063  * Those handles are the same opaque types used by the @ref arch_switch()
1064  * function.
1065  *
1066  * @warning
1067  * The @ref _current value may have changed after this call and not refer
1068  * to the interrupted thread anymore. It might be necessary to make a local
1069  * copy before calling this function.
1070  *
1071  * @param interrupted Handle for the thread that was interrupted or NULL.
1072  * @retval Handle for the next thread to execute, or @p interrupted when
1073  *         no new thread is to be scheduled.
1074  */
z_get_next_switch_handle(void * interrupted)1075 void *z_get_next_switch_handle(void *interrupted)
1076 {
1077 	z_check_stack_sentinel();
1078 
1079 #ifdef CONFIG_SMP
1080 	void *ret = NULL;
1081 
1082 	LOCKED(&sched_spinlock) {
1083 		struct k_thread *old_thread = _current, *new_thread;
1084 
1085 		if (is_aborting(_current)) {
1086 			end_thread(_current);
1087 		}
1088 
1089 		if (IS_ENABLED(CONFIG_SMP)) {
1090 			old_thread->switch_handle = NULL;
1091 		}
1092 		new_thread = next_up();
1093 
1094 		z_sched_usage_switch(new_thread);
1095 
1096 		if (old_thread != new_thread) {
1097 			update_metairq_preempt(new_thread);
1098 			z_sched_switch_spin(new_thread);
1099 			arch_cohere_stacks(old_thread, interrupted, new_thread);
1100 
1101 			_current_cpu->swap_ok = 0;
1102 			set_current(new_thread);
1103 
1104 #ifdef CONFIG_TIMESLICING
1105 			z_reset_time_slice(new_thread);
1106 #endif
1107 
1108 #ifdef CONFIG_SPIN_VALIDATE
1109 			/* Changed _current!  Update the spinlock
1110 			 * bookkeeping so the validation doesn't get
1111 			 * confused when the "wrong" thread tries to
1112 			 * release the lock.
1113 			 */
1114 			z_spin_lock_set_owner(&sched_spinlock);
1115 #endif
1116 
1117 			/* A queued (runnable) old/current thread
1118 			 * needs to be added back to the run queue
1119 			 * here, and atomically with its switch handle
1120 			 * being set below.  This is safe now, as we
1121 			 * will not return into it.
1122 			 */
1123 			if (z_is_thread_queued(old_thread)) {
1124 				runq_add(old_thread);
1125 			}
1126 		}
1127 		old_thread->switch_handle = interrupted;
1128 		ret = new_thread->switch_handle;
1129 		if (IS_ENABLED(CONFIG_SMP)) {
1130 			/* Active threads MUST have a null here */
1131 			new_thread->switch_handle = NULL;
1132 		}
1133 	}
1134 	signal_pending_ipi();
1135 	return ret;
1136 #else
1137 	z_sched_usage_switch(_kernel.ready_q.cache);
1138 	_current->switch_handle = interrupted;
1139 	set_current(_kernel.ready_q.cache);
1140 	return _current->switch_handle;
1141 #endif
1142 }
1143 #endif
1144 
z_priq_dumb_remove(sys_dlist_t * pq,struct k_thread * thread)1145 void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
1146 {
1147 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1148 
1149 	sys_dlist_remove(&thread->base.qnode_dlist);
1150 }
1151 
z_priq_dumb_best(sys_dlist_t * pq)1152 struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
1153 {
1154 	struct k_thread *thread = NULL;
1155 	sys_dnode_t *n = sys_dlist_peek_head(pq);
1156 
1157 	if (n != NULL) {
1158 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
1159 	}
1160 	return thread;
1161 }
1162 
z_priq_rb_lessthan(struct rbnode * a,struct rbnode * b)1163 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
1164 {
1165 	struct k_thread *thread_a, *thread_b;
1166 	int32_t cmp;
1167 
1168 	thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
1169 	thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
1170 
1171 	cmp = z_sched_prio_cmp(thread_a, thread_b);
1172 
1173 	if (cmp > 0) {
1174 		return true;
1175 	} else if (cmp < 0) {
1176 		return false;
1177 	} else {
1178 		return thread_a->base.order_key < thread_b->base.order_key
1179 			? 1 : 0;
1180 	}
1181 }
1182 
z_priq_rb_add(struct _priq_rb * pq,struct k_thread * thread)1183 void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
1184 {
1185 	struct k_thread *t;
1186 
1187 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1188 
1189 	thread->base.order_key = pq->next_order_key++;
1190 
1191 	/* Renumber at wraparound.  This is tiny code, and in practice
1192 	 * will almost never be hit on real systems.  BUT on very
1193 	 * long-running systems where a priq never completely empties
1194 	 * AND that contains very large numbers of threads, it can be
1195 	 * a latency glitch to loop over all the threads like this.
1196 	 */
1197 	if (!pq->next_order_key) {
1198 		RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
1199 			t->base.order_key = pq->next_order_key++;
1200 		}
1201 	}
1202 
1203 	rb_insert(&pq->tree, &thread->base.qnode_rb);
1204 }
1205 
z_priq_rb_remove(struct _priq_rb * pq,struct k_thread * thread)1206 void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
1207 {
1208 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
1209 
1210 	rb_remove(&pq->tree, &thread->base.qnode_rb);
1211 
1212 	if (!pq->tree.root) {
1213 		pq->next_order_key = 0;
1214 	}
1215 }
1216 
z_priq_rb_best(struct _priq_rb * pq)1217 struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
1218 {
1219 	struct k_thread *thread = NULL;
1220 	struct rbnode *n = rb_get_min(&pq->tree);
1221 
1222 	if (n != NULL) {
1223 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
1224 	}
1225 	return thread;
1226 }
1227 
1228 #ifdef CONFIG_SCHED_MULTIQ
1229 # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
1230 # error Too many priorities for multiqueue scheduler (max 32)
1231 # endif
1232 
z_priq_mq_add(struct _priq_mq * pq,struct k_thread * thread)1233 static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
1234 					struct k_thread *thread)
1235 {
1236 	int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1237 
1238 	sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
1239 	pq->bitmask |= BIT(priority_bit);
1240 }
1241 
z_priq_mq_remove(struct _priq_mq * pq,struct k_thread * thread)1242 static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
1243 					   struct k_thread *thread)
1244 {
1245 	int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1246 
1247 	sys_dlist_remove(&thread->base.qnode_dlist);
1248 	if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
1249 		pq->bitmask &= ~BIT(priority_bit);
1250 	}
1251 }
1252 #endif
1253 
z_priq_mq_best(struct _priq_mq * pq)1254 struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
1255 {
1256 	if (!pq->bitmask) {
1257 		return NULL;
1258 	}
1259 
1260 	struct k_thread *thread = NULL;
1261 	sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
1262 	sys_dnode_t *n = sys_dlist_peek_head(l);
1263 
1264 	if (n != NULL) {
1265 		thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
1266 	}
1267 	return thread;
1268 }
1269 
z_unpend_all(_wait_q_t * wait_q)1270 int z_unpend_all(_wait_q_t *wait_q)
1271 {
1272 	int need_sched = 0;
1273 	struct k_thread *thread;
1274 
1275 	while ((thread = z_waitq_head(wait_q)) != NULL) {
1276 		z_unpend_thread(thread);
1277 		z_ready_thread(thread);
1278 		need_sched = 1;
1279 	}
1280 
1281 	return need_sched;
1282 }
1283 
init_ready_q(struct _ready_q * rq)1284 void init_ready_q(struct _ready_q *rq)
1285 {
1286 #if defined(CONFIG_SCHED_SCALABLE)
1287 	rq->runq = (struct _priq_rb) {
1288 		.tree = {
1289 			.lessthan_fn = z_priq_rb_lessthan,
1290 		}
1291 	};
1292 #elif defined(CONFIG_SCHED_MULTIQ)
1293 	for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
1294 		sys_dlist_init(&rq->runq.queues[i]);
1295 	}
1296 #else
1297 	sys_dlist_init(&rq->runq);
1298 #endif
1299 }
1300 
z_sched_init(void)1301 void z_sched_init(void)
1302 {
1303 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1304 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
1305 		init_ready_q(&_kernel.cpus[i].ready_q);
1306 	}
1307 #else
1308 	init_ready_q(&_kernel.ready_q);
1309 #endif
1310 }
1311 
z_impl_k_thread_priority_get(k_tid_t thread)1312 int z_impl_k_thread_priority_get(k_tid_t thread)
1313 {
1314 	return thread->base.prio;
1315 }
1316 
1317 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_get(k_tid_t thread)1318 static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1319 {
1320 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1321 	return z_impl_k_thread_priority_get(thread);
1322 }
1323 #include <syscalls/k_thread_priority_get_mrsh.c>
1324 #endif
1325 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)1326 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
1327 {
1328 	/*
1329 	 * Use NULL, since we cannot know what the entry point is (we do not
1330 	 * keep track of it) and idle cannot change its priority.
1331 	 */
1332 	Z_ASSERT_VALID_PRIO(prio, NULL);
1333 	__ASSERT(!arch_is_in_isr(), "");
1334 
1335 	struct k_thread *th = (struct k_thread *)thread;
1336 
1337 	z_thread_priority_set(th, prio);
1338 }
1339 
1340 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)1341 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
1342 {
1343 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1344 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
1345 				    "invalid thread priority %d", prio));
1346 	Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
1347 				    "thread priority may only be downgraded (%d < %d)",
1348 				    prio, thread->base.prio));
1349 
1350 	z_impl_k_thread_priority_set(thread, prio);
1351 }
1352 #include <syscalls/k_thread_priority_set_mrsh.c>
1353 #endif
1354 
1355 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1356 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1357 {
1358 	struct k_thread *thread = tid;
1359 
1360 	LOCKED(&sched_spinlock) {
1361 		thread->base.prio_deadline = k_cycle_get_32() + deadline;
1362 		if (z_is_thread_queued(thread)) {
1363 			dequeue_thread(thread);
1364 			queue_thread(thread);
1365 		}
1366 	}
1367 }
1368 
1369 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1370 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1371 {
1372 	struct k_thread *thread = tid;
1373 
1374 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1375 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
1376 				    "invalid thread deadline %d",
1377 				    (int)deadline));
1378 
1379 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1380 }
1381 #include <syscalls/k_thread_deadline_set_mrsh.c>
1382 #endif
1383 #endif
1384 
k_can_yield(void)1385 bool k_can_yield(void)
1386 {
1387 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1388 		 z_is_idle_thread_object(_current));
1389 }
1390 
z_impl_k_yield(void)1391 void z_impl_k_yield(void)
1392 {
1393 	__ASSERT(!arch_is_in_isr(), "");
1394 
1395 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1396 
1397 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1398 
1399 	if (!IS_ENABLED(CONFIG_SMP) ||
1400 	    z_is_thread_queued(_current)) {
1401 		dequeue_thread(_current);
1402 	}
1403 	queue_thread(_current);
1404 	update_cache(1);
1405 	z_swap(&sched_spinlock, key);
1406 }
1407 
1408 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1409 static inline void z_vrfy_k_yield(void)
1410 {
1411 	z_impl_k_yield();
1412 }
1413 #include <syscalls/k_yield_mrsh.c>
1414 #endif
1415 
z_tick_sleep(k_ticks_t ticks)1416 static int32_t z_tick_sleep(k_ticks_t ticks)
1417 {
1418 #ifdef CONFIG_MULTITHREADING
1419 	uint32_t expected_wakeup_ticks;
1420 
1421 	__ASSERT(!arch_is_in_isr(), "");
1422 
1423 	LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
1424 
1425 	/* wait of 0 ms is treated as a 'yield' */
1426 	if (ticks == 0) {
1427 		k_yield();
1428 		return 0;
1429 	}
1430 
1431 	k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
1432 	if (Z_TICK_ABS(ticks) <= 0) {
1433 		expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1434 	} else {
1435 		expected_wakeup_ticks = Z_TICK_ABS(ticks);
1436 	}
1437 
1438 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1439 
1440 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1441 	pending_current = _current;
1442 #endif
1443 	unready_thread(_current);
1444 	z_add_thread_timeout(_current, timeout);
1445 	z_mark_thread_as_suspended(_current);
1446 
1447 	(void)z_swap(&sched_spinlock, key);
1448 
1449 	__ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1450 
1451 	ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
1452 	if (ticks > 0) {
1453 		return ticks;
1454 	}
1455 #endif
1456 
1457 	return 0;
1458 }
1459 
z_impl_k_sleep(k_timeout_t timeout)1460 int32_t z_impl_k_sleep(k_timeout_t timeout)
1461 {
1462 	k_ticks_t ticks;
1463 
1464 	__ASSERT(!arch_is_in_isr(), "");
1465 
1466 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1467 
1468 	/* in case of K_FOREVER, we suspend */
1469 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1470 		k_thread_suspend(_current);
1471 
1472 		SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1473 
1474 		return (int32_t) K_TICKS_FOREVER;
1475 	}
1476 
1477 	ticks = timeout.ticks;
1478 
1479 	ticks = z_tick_sleep(ticks);
1480 
1481 	int32_t ret = k_ticks_to_ms_floor64(ticks);
1482 
1483 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1484 
1485 	return ret;
1486 }
1487 
1488 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1489 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1490 {
1491 	return z_impl_k_sleep(timeout);
1492 }
1493 #include <syscalls/k_sleep_mrsh.c>
1494 #endif
1495 
z_impl_k_usleep(int us)1496 int32_t z_impl_k_usleep(int us)
1497 {
1498 	int32_t ticks;
1499 
1500 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1501 
1502 	ticks = k_us_to_ticks_ceil64(us);
1503 	ticks = z_tick_sleep(ticks);
1504 
1505 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, k_ticks_to_us_floor64(ticks));
1506 
1507 	return k_ticks_to_us_floor64(ticks);
1508 }
1509 
1510 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int us)1511 static inline int32_t z_vrfy_k_usleep(int us)
1512 {
1513 	return z_impl_k_usleep(us);
1514 }
1515 #include <syscalls/k_usleep_mrsh.c>
1516 #endif
1517 
z_impl_k_wakeup(k_tid_t thread)1518 void z_impl_k_wakeup(k_tid_t thread)
1519 {
1520 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1521 
1522 	if (z_is_thread_pending(thread)) {
1523 		return;
1524 	}
1525 
1526 	if (z_abort_thread_timeout(thread) < 0) {
1527 		/* Might have just been sleeping forever */
1528 		if (thread->base.thread_state != _THREAD_SUSPENDED) {
1529 			return;
1530 		}
1531 	}
1532 
1533 	z_mark_thread_as_not_suspended(thread);
1534 	z_ready_thread(thread);
1535 
1536 	flag_ipi();
1537 
1538 	if (!arch_is_in_isr()) {
1539 		z_reschedule_unlocked();
1540 	}
1541 }
1542 
1543 #ifdef CONFIG_TRACE_SCHED_IPI
1544 extern void z_trace_sched_ipi(void);
1545 #endif
1546 
1547 #ifdef CONFIG_SMP
z_sched_ipi(void)1548 void z_sched_ipi(void)
1549 {
1550 	/* NOTE: When adding code to this, make sure this is called
1551 	 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1552 	 */
1553 #ifdef CONFIG_TRACE_SCHED_IPI
1554 	z_trace_sched_ipi();
1555 #endif
1556 
1557 #ifdef CONFIG_TIMESLICING
1558 	if (sliceable(_current)) {
1559 		z_time_slice();
1560 	}
1561 #endif
1562 }
1563 #endif
1564 
1565 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1566 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1567 {
1568 	Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1569 	z_impl_k_wakeup(thread);
1570 }
1571 #include <syscalls/k_wakeup_mrsh.c>
1572 #endif
1573 
z_impl_z_current_get(void)1574 k_tid_t z_impl_z_current_get(void)
1575 {
1576 #ifdef CONFIG_SMP
1577 	/* In SMP, _current is a field read from _current_cpu, which
1578 	 * can race with preemption before it is read.  We must lock
1579 	 * local interrupts when reading it.
1580 	 */
1581 	unsigned int k = arch_irq_lock();
1582 #endif
1583 
1584 	k_tid_t ret = _current_cpu->current;
1585 
1586 #ifdef CONFIG_SMP
1587 	arch_irq_unlock(k);
1588 #endif
1589 	return ret;
1590 }
1591 
1592 #ifdef CONFIG_USERSPACE
z_vrfy_z_current_get(void)1593 static inline k_tid_t z_vrfy_z_current_get(void)
1594 {
1595 	return z_impl_z_current_get();
1596 }
1597 #include <syscalls/z_current_get_mrsh.c>
1598 #endif
1599 
z_impl_k_is_preempt_thread(void)1600 int z_impl_k_is_preempt_thread(void)
1601 {
1602 	return !arch_is_in_isr() && is_preempt(_current);
1603 }
1604 
1605 #ifdef CONFIG_USERSPACE
z_vrfy_k_is_preempt_thread(void)1606 static inline int z_vrfy_k_is_preempt_thread(void)
1607 {
1608 	return z_impl_k_is_preempt_thread();
1609 }
1610 #include <syscalls/k_is_preempt_thread_mrsh.c>
1611 #endif
1612 
1613 #ifdef CONFIG_SCHED_CPU_MASK
1614 # ifdef CONFIG_SMP
1615 /* Right now we use a single byte for this mask */
1616 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 8, "Too many CPUs for mask word");
1617 # endif
1618 
1619 
cpu_mask_mod(k_tid_t thread,uint32_t enable_mask,uint32_t disable_mask)1620 static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
1621 {
1622 	int ret = 0;
1623 
1624 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1625 	__ASSERT(z_is_thread_prevented_from_running(thread),
1626 		 "Running threads cannot change CPU pin");
1627 #endif
1628 
1629 	LOCKED(&sched_spinlock) {
1630 		if (z_is_thread_prevented_from_running(thread)) {
1631 			thread->base.cpu_mask |= enable_mask;
1632 			thread->base.cpu_mask  &= ~disable_mask;
1633 		} else {
1634 			ret = -EINVAL;
1635 		}
1636 	}
1637 
1638 #if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
1639 		int m = thread->base.cpu_mask;
1640 
1641 		__ASSERT((m == 0) || ((m & (m - 1)) == 0),
1642 			 "Only one CPU allowed in mask when PIN_ONLY");
1643 #endif
1644 
1645 	return ret;
1646 }
1647 
k_thread_cpu_mask_clear(k_tid_t thread)1648 int k_thread_cpu_mask_clear(k_tid_t thread)
1649 {
1650 	return cpu_mask_mod(thread, 0, 0xffffffff);
1651 }
1652 
k_thread_cpu_mask_enable_all(k_tid_t thread)1653 int k_thread_cpu_mask_enable_all(k_tid_t thread)
1654 {
1655 	return cpu_mask_mod(thread, 0xffffffff, 0);
1656 }
1657 
k_thread_cpu_mask_enable(k_tid_t thread,int cpu)1658 int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
1659 {
1660 	return cpu_mask_mod(thread, BIT(cpu), 0);
1661 }
1662 
k_thread_cpu_mask_disable(k_tid_t thread,int cpu)1663 int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
1664 {
1665 	return cpu_mask_mod(thread, 0, BIT(cpu));
1666 }
1667 
k_thread_cpu_pin(k_tid_t thread,int cpu)1668 int k_thread_cpu_pin(k_tid_t thread, int cpu)
1669 {
1670 	int ret;
1671 
1672 	ret = k_thread_cpu_mask_clear(thread);
1673 	if (ret == 0) {
1674 		return k_thread_cpu_mask_enable(thread, cpu);
1675 	}
1676 	return ret;
1677 }
1678 
1679 #endif /* CONFIG_SCHED_CPU_MASK */
1680 
unpend_all(_wait_q_t * wait_q)1681 static inline void unpend_all(_wait_q_t *wait_q)
1682 {
1683 	struct k_thread *thread;
1684 
1685 	while ((thread = z_waitq_head(wait_q)) != NULL) {
1686 		unpend_thread_no_timeout(thread);
1687 		(void)z_abort_thread_timeout(thread);
1688 		arch_thread_return_value_set(thread, 0);
1689 		ready_thread(thread);
1690 	}
1691 }
1692 
1693 #ifdef CONFIG_CMSIS_RTOS_V1
1694 extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
1695 #endif
1696 
end_thread(struct k_thread * thread)1697 static void end_thread(struct k_thread *thread)
1698 {
1699 	/* We hold the lock, and the thread is known not to be running
1700 	 * anywhere.
1701 	 */
1702 	if ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
1703 		thread->base.thread_state |= _THREAD_DEAD;
1704 		thread->base.thread_state &= ~_THREAD_ABORTING;
1705 		if (z_is_thread_queued(thread)) {
1706 			dequeue_thread(thread);
1707 		}
1708 		if (thread->base.pended_on != NULL) {
1709 			unpend_thread_no_timeout(thread);
1710 		}
1711 		(void)z_abort_thread_timeout(thread);
1712 		unpend_all(&thread->join_queue);
1713 		update_cache(1);
1714 
1715 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1716 
1717 		z_thread_monitor_exit(thread);
1718 
1719 #ifdef CONFIG_CMSIS_RTOS_V1
1720 		z_thread_cmsis_status_mask_clear(thread);
1721 #endif
1722 
1723 #ifdef CONFIG_USERSPACE
1724 		z_mem_domain_exit_thread(thread);
1725 		z_thread_perms_all_clear(thread);
1726 		z_object_uninit(thread->stack_obj);
1727 		z_object_uninit(thread);
1728 #endif
1729 	}
1730 }
1731 
z_thread_abort(struct k_thread * thread)1732 void z_thread_abort(struct k_thread *thread)
1733 {
1734 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1735 
1736 	if ((thread->base.user_options & K_ESSENTIAL) != 0) {
1737 		k_spin_unlock(&sched_spinlock, key);
1738 		__ASSERT(false, "aborting essential thread %p", thread);
1739 		k_panic();
1740 		return;
1741 	}
1742 
1743 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1744 		k_spin_unlock(&sched_spinlock, key);
1745 		return;
1746 	}
1747 
1748 #ifdef CONFIG_SMP
1749 	if (is_aborting(thread) && thread == _current && arch_is_in_isr()) {
1750 		/* Another CPU is spinning for us, don't deadlock */
1751 		end_thread(thread);
1752 	}
1753 
1754 	bool active = thread_active_elsewhere(thread);
1755 
1756 	if (active) {
1757 		/* It's running somewhere else, flag and poke */
1758 		thread->base.thread_state |= _THREAD_ABORTING;
1759 
1760 		/* We're going to spin, so need a true synchronous IPI
1761 		 * here, not deferred!
1762 		 */
1763 #ifdef CONFIG_SCHED_IPI_SUPPORTED
1764 		arch_sched_ipi();
1765 #endif
1766 	}
1767 
1768 	if (is_aborting(thread) && thread != _current) {
1769 		if (arch_is_in_isr()) {
1770 			/* ISRs can only spin waiting another CPU */
1771 			k_spin_unlock(&sched_spinlock, key);
1772 			while (is_aborting(thread)) {
1773 			}
1774 
1775 			/* Now we know it's dying, but not necessarily
1776 			 * dead.  Wait for the switch to happen!
1777 			 */
1778 			key = k_spin_lock(&sched_spinlock);
1779 			z_sched_switch_spin(thread);
1780 			k_spin_unlock(&sched_spinlock, key);
1781 		} else if (active) {
1782 			/* Threads can join */
1783 			add_to_waitq_locked(_current, &thread->join_queue);
1784 			z_swap(&sched_spinlock, key);
1785 		}
1786 		return; /* lock has been released */
1787 	}
1788 #endif
1789 	end_thread(thread);
1790 	if (thread == _current && !arch_is_in_isr()) {
1791 		z_swap(&sched_spinlock, key);
1792 		__ASSERT(false, "aborted _current back from dead");
1793 	}
1794 	k_spin_unlock(&sched_spinlock, key);
1795 }
1796 
1797 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(struct k_thread * thread)1798 void z_impl_k_thread_abort(struct k_thread *thread)
1799 {
1800 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1801 
1802 	z_thread_abort(thread);
1803 
1804 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1805 }
1806 #endif
1807 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1808 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1809 {
1810 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1811 	int ret = 0;
1812 
1813 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1814 
1815 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1816 		z_sched_switch_spin(thread);
1817 		ret = 0;
1818 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1819 		ret = -EBUSY;
1820 	} else if ((thread == _current) ||
1821 		   (thread->base.pended_on == &_current->join_queue)) {
1822 		ret = -EDEADLK;
1823 	} else {
1824 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1825 		add_to_waitq_locked(_current, &thread->join_queue);
1826 		add_thread_timeout(_current, timeout);
1827 
1828 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1829 		ret = z_swap(&sched_spinlock, key);
1830 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1831 
1832 		return ret;
1833 	}
1834 
1835 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1836 
1837 	k_spin_unlock(&sched_spinlock, key);
1838 	return ret;
1839 }
1840 
1841 #ifdef CONFIG_USERSPACE
1842 /* Special case: don't oops if the thread is uninitialized.  This is because
1843  * the initialization bit does double-duty for thread objects; if false, means
1844  * the thread object is truly uninitialized, or the thread ran and exited for
1845  * some reason.
1846  *
1847  * Return true in this case indicating we should just do nothing and return
1848  * success to the caller.
1849  */
thread_obj_validate(struct k_thread * thread)1850 static bool thread_obj_validate(struct k_thread *thread)
1851 {
1852 	struct z_object *ko = z_object_find(thread);
1853 	int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1854 
1855 	switch (ret) {
1856 	case 0:
1857 		return false;
1858 	case -EINVAL:
1859 		return true;
1860 	default:
1861 #ifdef CONFIG_LOG
1862 		z_dump_object_error(ret, thread, ko, K_OBJ_THREAD);
1863 #endif
1864 		Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied"));
1865 	}
1866 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1867 }
1868 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1869 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1870 				       k_timeout_t timeout)
1871 {
1872 	if (thread_obj_validate(thread)) {
1873 		return 0;
1874 	}
1875 
1876 	return z_impl_k_thread_join(thread, timeout);
1877 }
1878 #include <syscalls/k_thread_join_mrsh.c>
1879 
z_vrfy_k_thread_abort(k_tid_t thread)1880 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1881 {
1882 	if (thread_obj_validate(thread)) {
1883 		return;
1884 	}
1885 
1886 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
1887 				    "aborting essential thread %p", thread));
1888 
1889 	z_impl_k_thread_abort((struct k_thread *)thread);
1890 }
1891 #include <syscalls/k_thread_abort_mrsh.c>
1892 #endif /* CONFIG_USERSPACE */
1893 
1894 /*
1895  * future scheduler.h API implementations
1896  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1897 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1898 {
1899 	struct k_thread *thread;
1900 	bool ret = false;
1901 
1902 	LOCKED(&sched_spinlock) {
1903 		thread = _priq_wait_best(&wait_q->waitq);
1904 
1905 		if (thread != NULL) {
1906 			z_thread_return_value_set_with_data(thread,
1907 							    swap_retval,
1908 							    swap_data);
1909 			unpend_thread_no_timeout(thread);
1910 			(void)z_abort_thread_timeout(thread);
1911 			ready_thread(thread);
1912 			ret = true;
1913 		}
1914 	}
1915 
1916 	return ret;
1917 }
1918 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1919 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1920 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1921 {
1922 	int ret = z_pend_curr(lock, key, wait_q, timeout);
1923 
1924 	if (data != NULL) {
1925 		*data = _current->base.swap_data;
1926 	}
1927 	return ret;
1928 }
1929 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1930 int z_sched_waitq_walk(_wait_q_t  *wait_q,
1931 		       int (*func)(struct k_thread *, void *), void *data)
1932 {
1933 	struct k_thread *thread;
1934 	int  status = 0;
1935 
1936 	LOCKED(&sched_spinlock) {
1937 		_WAIT_Q_FOR_EACH(wait_q, thread) {
1938 
1939 			/*
1940 			 * Invoke the callback function on each waiting thread
1941 			 * for as long as there are both waiting threads AND
1942 			 * it returns 0.
1943 			 */
1944 
1945 			status = func(thread, data);
1946 			if (status != 0) {
1947 				break;
1948 			}
1949 		}
1950 	}
1951 
1952 	return status;
1953 }
1954