1 /*
2  * Copyright (c) 2010-2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * @brief Kernel semaphore object.
11  *
12  * The semaphores are of the 'counting' type, i.e. each 'give' operation will
13  * increment the internal count by 1, if no thread is pending on it. The 'init'
14  * call initializes the count to 'initial_count'. Following multiple 'give'
15  * operations, the same number of 'take' operations can be performed without
16  * the calling thread having to pend on the semaphore, or the calling task
17  * having to poll.
18  */
19 
20 #include <zephyr/kernel.h>
21 #include <zephyr/kernel_structs.h>
22 
23 #include <zephyr/toolchain.h>
24 #include <zephyr/wait_q.h>
25 #include <zephyr/sys/dlist.h>
26 #include <ksched.h>
27 #include <zephyr/init.h>
28 #include <zephyr/syscall_handler.h>
29 #include <zephyr/tracing/tracing.h>
30 #include <zephyr/sys/check.h>
31 
32 /* We use a system-wide lock to synchronize semaphores, which has
33  * unfortunate performance impact vs. using a per-object lock
34  * (semaphores are *very* widely used).  But per-object locks require
35  * significant extra RAM.  A properly spin-aware semaphore
36  * implementation would spin on atomic access to the count variable,
37  * and not a spinlock per se.  Useful optimization for the future...
38  */
39 static struct k_spinlock lock;
40 
z_impl_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit)41 int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
42 		      unsigned int limit)
43 {
44 	/*
45 	 * Limit cannot be zero and count cannot be greater than limit
46 	 */
47 	CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) {
48 		SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL);
49 
50 		return -EINVAL;
51 	}
52 
53 	sem->count = initial_count;
54 	sem->limit = limit;
55 
56 	SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0);
57 
58 	z_waitq_init(&sem->wait_q);
59 #if defined(CONFIG_POLL)
60 	sys_dlist_init(&sem->poll_events);
61 #endif
62 	z_object_init(sem);
63 
64 	return 0;
65 }
66 
67 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit)68 int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
69 		      unsigned int limit)
70 {
71 	Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
72 	return z_impl_k_sem_init(sem, initial_count, limit);
73 }
74 #include <syscalls/k_sem_init_mrsh.c>
75 #endif
76 
handle_poll_events(struct k_sem * sem)77 static inline bool handle_poll_events(struct k_sem *sem)
78 {
79 #ifdef CONFIG_POLL
80 	z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
81 	return true;
82 #else
83 	ARG_UNUSED(sem);
84 	return false;
85 #endif
86 }
87 
z_impl_k_sem_give(struct k_sem * sem)88 void z_impl_k_sem_give(struct k_sem *sem)
89 {
90 	k_spinlock_key_t key = k_spin_lock(&lock);
91 	struct k_thread *thread;
92 	bool resched = true;
93 
94 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem);
95 
96 	thread = z_unpend_first_thread(&sem->wait_q);
97 
98 	if (thread != NULL) {
99 		arch_thread_return_value_set(thread, 0);
100 		z_ready_thread(thread);
101 	} else {
102 		sem->count += (sem->count != sem->limit) ? 1U : 0U;
103 		resched = handle_poll_events(sem);
104 	}
105 
106 	if (resched) {
107 		z_reschedule(&lock, key);
108 	} else {
109 		k_spin_unlock(&lock, key);
110 	}
111 
112 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem);
113 }
114 
115 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_give(struct k_sem * sem)116 static inline void z_vrfy_k_sem_give(struct k_sem *sem)
117 {
118 	Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
119 	z_impl_k_sem_give(sem);
120 }
121 #include <syscalls/k_sem_give_mrsh.c>
122 #endif
123 
z_impl_k_sem_take(struct k_sem * sem,k_timeout_t timeout)124 int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
125 {
126 	int ret = 0;
127 
128 	__ASSERT(((arch_is_in_isr() == false) ||
129 		  K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
130 
131 	k_spinlock_key_t key = k_spin_lock(&lock);
132 
133 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout);
134 
135 	if (likely(sem->count > 0U)) {
136 		sem->count--;
137 		k_spin_unlock(&lock, key);
138 		ret = 0;
139 		goto out;
140 	}
141 
142 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
143 		k_spin_unlock(&lock, key);
144 		ret = -EBUSY;
145 		goto out;
146 	}
147 
148 	SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout);
149 
150 	ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
151 
152 out:
153 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret);
154 
155 	return ret;
156 }
157 
z_impl_k_sem_reset(struct k_sem * sem)158 void z_impl_k_sem_reset(struct k_sem *sem)
159 {
160 	struct k_thread *thread;
161 	k_spinlock_key_t key = k_spin_lock(&lock);
162 
163 	while (true) {
164 		thread = z_unpend_first_thread(&sem->wait_q);
165 		if (thread == NULL) {
166 			break;
167 		}
168 		arch_thread_return_value_set(thread, -EAGAIN);
169 		z_ready_thread(thread);
170 	}
171 	sem->count = 0;
172 
173 	SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem);
174 
175 	handle_poll_events(sem);
176 
177 	z_reschedule(&lock, key);
178 }
179 
180 #ifdef CONFIG_USERSPACE
z_vrfy_k_sem_take(struct k_sem * sem,k_timeout_t timeout)181 static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
182 {
183 	Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
184 	return z_impl_k_sem_take((struct k_sem *)sem, timeout);
185 }
186 #include <syscalls/k_sem_take_mrsh.c>
187 
z_vrfy_k_sem_reset(struct k_sem * sem)188 static inline void z_vrfy_k_sem_reset(struct k_sem *sem)
189 {
190 	Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
191 	z_impl_k_sem_reset(sem);
192 }
193 #include <syscalls/k_sem_reset_mrsh.c>
194 
z_vrfy_k_sem_count_get(struct k_sem * sem)195 static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
196 {
197 	Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
198 	return z_impl_k_sem_count_get(sem);
199 }
200 #include <syscalls/k_sem_count_get_mrsh.c>
201 
202 #endif
203