1 /*
2  * Copyright (c) 2010-2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * @brief Kernel semaphore object.
11  *
12  * The semaphores are of the 'counting' type, i.e. each 'give' operation will
13  * increment the internal count by 1, if no thread is pending on it. The 'init'
14  * call initializes the count to 'initial_count'. Following multiple 'give'
15  * operations, the same number of 'take' operations can be performed without
16  * the calling thread having to pend on the semaphore, or the calling task
17  * having to poll.
18  */
19 
20 #include <zephyr/kernel.h>
21 #include <zephyr/kernel_structs.h>
22 
23 #include <zephyr/toolchain.h>
24 #include <wait_q.h>
25 #include <zephyr/sys/dlist.h>
26 #include <ksched.h>
27 #include <zephyr/init.h>
28 #include <zephyr/internal/syscall_handler.h>
29 #include <zephyr/tracing/tracing.h>
30 #include <zephyr/sys/check.h>
31 
32 /* We use a system-wide lock to synchronize semaphores, which has
33  * unfortunate performance impact vs. using a per-object lock
34  * (semaphores are *very* widely used).  But per-object locks require
35  * significant extra RAM.  A properly spin-aware semaphore
36  * implementation would spin on atomic access to the count variable,
37  * and not a spinlock per se.  Useful optimization for the future...
38  */
39 static struct k_spinlock lock;
40 
41 #ifdef CONFIG_OBJ_CORE_SEM
42 static struct k_obj_type obj_type_sem;
43 #endif
44 
z_impl_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit)45 int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
46 		      unsigned int limit)
47 {
48 	/*
49 	 * Limit cannot be zero and count cannot be greater than limit
50 	 */
51 	CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) {
52 		SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
53 
54 		return -EINVAL;
55 	}
56 
57 	sem->count = initial_count;
58 	sem->limit = limit;
59 
60 	SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
61 
62 	z_waitq_init(&sem->wait_q);
63 #if defined(CONFIG_POLL)
64 	sys_dlist_init(&sem->poll_events);
65 #endif
66 	k_object_init(sem);
67 
68 #ifdef CONFIG_OBJ_CORE_SEM
69 	k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
70 #endif
71 
72 	return 0;
73 }
74 
75 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit)76 int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
77 		      unsigned int limit)
78 {
79 	K_OOPS(K_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
80 	return z_impl_k_sem_init(sem, initial_count, limit);
81 }
82 #include <syscalls/k_sem_init_mrsh.c>
83 #endif
84 
handle_poll_events(struct k_sem * sem)85 static inline bool handle_poll_events(struct k_sem *sem)
86 {
87 #ifdef CONFIG_POLL
88 	z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
89 	return true;
90 #else
91 	ARG_UNUSED(sem);
92 	return false;
93 #endif
94 }
95 
z_impl_k_sem_give(struct k_sem * sem)96 void z_impl_k_sem_give(struct k_sem *sem)
97 {
98 	k_spinlock_key_t key = k_spin_lock(&lock);
99 	struct k_thread *thread;
100 	bool resched = true;
101 
102 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
103 
104 	thread = z_unpend_first_thread(&sem->wait_q);
105 
106 	if (thread != NULL) {
107 		arch_thread_return_value_set(thread, 0);
108 		z_ready_thread(thread);
109 	} else {
110 		sem->count += (sem->count != sem->limit) ? 1U : 0U;
111 		resched = handle_poll_events(sem);
112 	}
113 
114 	if (resched) {
115 		z_reschedule(&lock, key);
116 	} else {
117 		k_spin_unlock(&lock, key);
118 	}
119 
120 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
121 }
122 
123 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_give(struct k_sem * sem)124 static inline void z_vrfy_k_sem_give(struct k_sem *sem)
125 {
126 	K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
127 	z_impl_k_sem_give(sem);
128 }
129 #include <syscalls/k_sem_give_mrsh.c>
130 #endif
131 
z_impl_k_sem_take(struct k_sem * sem,k_timeout_t timeout)132 int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
133 {
134 	int ret = 0;
135 
136 	__ASSERT(((arch_is_in_isr() == false) ||
137 		  K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
138 
139 	k_spinlock_key_t key = k_spin_lock(&lock);
140 
141 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
142 
143 	if (likely(sem->count > 0U)) {
144 		sem->count--;
145 		k_spin_unlock(&lock, key);
146 		ret = 0;
147 		goto out;
148 	}
149 
150 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
151 		k_spin_unlock(&lock, key);
152 		ret = -EBUSY;
153 		goto out;
154 	}
155 
156 	SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
157 
158 	ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
159 
160 out:
161 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
162 
163 	return ret;
164 }
165 
z_impl_k_sem_reset(struct k_sem * sem)166 void z_impl_k_sem_reset(struct k_sem *sem)
167 {
168 	struct k_thread *thread;
169 	k_spinlock_key_t key = k_spin_lock(&lock);
170 
171 	while (true) {
172 		thread = z_unpend_first_thread(&sem->wait_q);
173 		if (thread == NULL) {
174 			break;
175 		}
176 		arch_thread_return_value_set(thread, -EAGAIN);
177 		z_ready_thread(thread);
178 	}
179 	sem->count = 0;
180 
181 	SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
182 
183 	handle_poll_events(sem);
184 
185 	z_reschedule(&lock, key);
186 }
187 
188 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_take(struct k_sem * sem,k_timeout_t timeout)189 static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
190 {
191 	K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
192 	return z_impl_k_sem_take((struct k_sem *)sem, timeout);
193 }
194 #include <syscalls/k_sem_take_mrsh.c>
195 
z_vrfy_k_sem_reset(struct k_sem * sem)196 static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
197 {
198 	K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
199 	z_impl_k_sem_reset(sem);
200 }
201 #include <syscalls/k_sem_reset_mrsh.c>
202 
z_vrfy_k_sem_count_get(struct k_sem * sem)203 static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
204 {
205 	K_OOPS(K_SYSCALL_OBJ(sem, K_OBJ_SEM));
206 	return z_impl_k_sem_count_get(sem);
207 }
208 #include <syscalls/k_sem_count_get_mrsh.c>
209 
210 #endif
211 
212 #ifdef CONFIG_OBJ_CORE_SEM
init_sem_obj_core_list(void)213 static int init_sem_obj_core_list(void)
214 {
215 	/* Initialize semaphore object type */
216 
217 	z_obj_type_init(&obj_type_sem, K_OBJ_TYPE_SEM_ID,
218 			offsetof(struct k_sem, obj_core));
219 
220 	/* Initialize and link statically defined semaphores */
221 
222 	STRUCT_SECTION_FOREACH(k_sem, sem) {
223 		k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
224 	}
225 
226 	return 0;
227 }
228 
229 SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
230 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
231 #endif
232