1 /*
2  * Copyright (c) 2010-2014 Wind River Systems, Inc.
3  * Copyright (c) 2024 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <kthread.h>
10 
11 struct k_spinlock z_thread_monitor_lock;
12 /*
13  * Remove a thread from the kernel's list of active threads.
14  */
z_thread_monitor_exit(struct k_thread * thread)15 void z_thread_monitor_exit(struct k_thread *thread)
16 {
17 	k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
18 
19 	if (thread == _kernel.threads) {
20 		_kernel.threads = _kernel.threads->next_thread;
21 	} else {
22 		struct k_thread *prev_thread;
23 
24 		prev_thread = _kernel.threads;
25 		while ((prev_thread != NULL) &&
26 			(thread != prev_thread->next_thread)) {
27 			prev_thread = prev_thread->next_thread;
28 		}
29 		if (prev_thread != NULL) {
30 			prev_thread->next_thread = thread->next_thread;
31 		}
32 	}
33 
34 	k_spin_unlock(&z_thread_monitor_lock, key);
35 }
36 
37 
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)38 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
39 {
40 	struct k_thread *thread;
41 	k_spinlock_key_t key;
42 
43 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
44 
45 	/*
46 	 * Lock is needed to make sure that the _kernel.threads is not being
47 	 * modified by the user_cb either directly or indirectly.
48 	 * The indirect ways are through calling k_thread_create and
49 	 * k_thread_abort from user_cb.
50 	 */
51 	key = k_spin_lock(&z_thread_monitor_lock);
52 
53 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
54 
55 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
56 		user_cb(thread, user_data);
57 	}
58 
59 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
60 
61 	k_spin_unlock(&z_thread_monitor_lock, key);
62 }
63 
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)64 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
65 {
66 	struct k_thread *thread;
67 	k_spinlock_key_t key;
68 
69 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
70 
71 	key = k_spin_lock(&z_thread_monitor_lock);
72 
73 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
74 
75 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
76 		k_spin_unlock(&z_thread_monitor_lock, key);
77 		user_cb(thread, user_data);
78 		key = k_spin_lock(&z_thread_monitor_lock);
79 	}
80 
81 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
82 
83 	k_spin_unlock(&z_thread_monitor_lock, key);
84 
85 }
86 
87 #ifdef CONFIG_SMP
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)88 void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
89 				  void *user_data)
90 {
91 	struct k_thread *thread;
92 	k_spinlock_key_t key;
93 
94 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
95 	__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
96 
97 	/*
98 	 * Lock is needed to make sure that the _kernel.threads is not being
99 	 * modified by the user_cb either directly or indirectly.
100 	 * The indirect ways are through calling k_thread_create and
101 	 * k_thread_abort from user_cb.
102 	 */
103 	key = k_spin_lock(&z_thread_monitor_lock);
104 
105 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
106 
107 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
108 		if (thread->base.cpu == cpu)
109 			user_cb(thread, user_data);
110 	}
111 
112 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
113 
114 	k_spin_unlock(&z_thread_monitor_lock, key);
115 }
116 
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)117 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
118 					     void *user_data)
119 {
120 	struct k_thread *thread;
121 	k_spinlock_key_t key;
122 
123 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
124 	__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
125 
126 	key = k_spin_lock(&z_thread_monitor_lock);
127 
128 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
129 
130 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
131 		if (thread->base.cpu == cpu) {
132 			k_spin_unlock(&z_thread_monitor_lock, key);
133 			user_cb(thread, user_data);
134 			key = k_spin_lock(&z_thread_monitor_lock);
135 		}
136 	}
137 
138 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
139 
140 	k_spin_unlock(&z_thread_monitor_lock, key);
141 }
142 #endif /* CONFIG_SMP */
143