1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "posix_internal.h"
8
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/posix/pthread.h>
13 #include <zephyr/sys/bitarray.h>
14
15 #define CONCURRENT_READER_LIMIT (CONFIG_POSIX_THREAD_THREADS_MAX + 1)
16
17 struct posix_rwlock {
18 struct k_sem rd_sem;
19 struct k_sem wr_sem;
20 struct k_sem reader_active; /* blocks WR till reader has acquired lock */
21 k_tid_t wr_owner;
22 };
23
24 struct posix_rwlockattr {
25 bool initialized: 1;
26 bool pshared: 1;
27 };
28
29 int64_t timespec_to_timeoutms(const struct timespec *abstime);
30 static uint32_t read_lock_acquire(struct posix_rwlock *rwl, int32_t timeout);
31 static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout);
32
33 LOG_MODULE_REGISTER(pthread_rwlock, CONFIG_PTHREAD_RWLOCK_LOG_LEVEL);
34
35 static struct k_spinlock posix_rwlock_spinlock;
36
37 static struct posix_rwlock posix_rwlock_pool[CONFIG_MAX_PTHREAD_RWLOCK_COUNT];
38 SYS_BITARRAY_DEFINE_STATIC(posix_rwlock_bitarray, CONFIG_MAX_PTHREAD_RWLOCK_COUNT);
39
40 /*
41 * We reserve the MSB to mark a pthread_rwlock_t as initialized (from the
42 * perspective of the application). With a linear space, this means that
43 * the theoretical pthread_rwlock_t range is [0,2147483647].
44 */
45 BUILD_ASSERT(CONFIG_MAX_PTHREAD_RWLOCK_COUNT < PTHREAD_OBJ_MASK_INIT,
46 "CONFIG_MAX_PTHREAD_RWLOCK_COUNT is too high");
47
posix_rwlock_to_offset(struct posix_rwlock * rwl)48 static inline size_t posix_rwlock_to_offset(struct posix_rwlock *rwl)
49 {
50 return rwl - posix_rwlock_pool;
51 }
52
to_posix_rwlock_idx(pthread_rwlock_t rwlock)53 static inline size_t to_posix_rwlock_idx(pthread_rwlock_t rwlock)
54 {
55 return mark_pthread_obj_uninitialized(rwlock);
56 }
57
get_posix_rwlock(pthread_rwlock_t rwlock)58 static struct posix_rwlock *get_posix_rwlock(pthread_rwlock_t rwlock)
59 {
60 int actually_initialized;
61 size_t bit = to_posix_rwlock_idx(rwlock);
62
63 /* if the provided rwlock does not claim to be initialized, its invalid */
64 if (!is_pthread_obj_initialized(rwlock)) {
65 LOG_DBG("RWlock is uninitialized (%x)", rwlock);
66 return NULL;
67 }
68
69 /* Mask off the MSB to get the actual bit index */
70 if (sys_bitarray_test_bit(&posix_rwlock_bitarray, bit, &actually_initialized) < 0) {
71 LOG_DBG("RWlock is invalid (%x)", rwlock);
72 return NULL;
73 }
74
75 if (actually_initialized == 0) {
76 /* The rwlock claims to be initialized but is actually not */
77 LOG_DBG("RWlock claims to be initialized (%x)", rwlock);
78 return NULL;
79 }
80
81 return &posix_rwlock_pool[bit];
82 }
83
to_posix_rwlock(pthread_rwlock_t * rwlock)84 struct posix_rwlock *to_posix_rwlock(pthread_rwlock_t *rwlock)
85 {
86 size_t bit;
87 struct posix_rwlock *rwl;
88
89 if (*rwlock != PTHREAD_RWLOCK_INITIALIZER) {
90 return get_posix_rwlock(*rwlock);
91 }
92
93 /* Try and automatically associate a posix_rwlock */
94 if (sys_bitarray_alloc(&posix_rwlock_bitarray, 1, &bit) < 0) {
95 LOG_DBG("Unable to allocate pthread_rwlock_t");
96 return NULL;
97 }
98
99 /* Record the associated posix_rwlock in rwl and mark as initialized */
100 *rwlock = mark_pthread_obj_initialized(bit);
101
102 /* Initialize the posix_rwlock */
103 rwl = &posix_rwlock_pool[bit];
104
105 return rwl;
106 }
107
108 /**
109 * @brief Initialize read-write lock object.
110 *
111 * See IEEE 1003.1
112 */
pthread_rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)113 int pthread_rwlock_init(pthread_rwlock_t *rwlock,
114 const pthread_rwlockattr_t *attr)
115 {
116 struct posix_rwlock *rwl;
117
118 ARG_UNUSED(attr);
119 *rwlock = PTHREAD_RWLOCK_INITIALIZER;
120
121 rwl = to_posix_rwlock(rwlock);
122 if (rwl == NULL) {
123 return ENOMEM;
124 }
125
126 k_sem_init(&rwl->rd_sem, CONCURRENT_READER_LIMIT, CONCURRENT_READER_LIMIT);
127 k_sem_init(&rwl->wr_sem, 1, 1);
128 k_sem_init(&rwl->reader_active, 1, 1);
129 rwl->wr_owner = NULL;
130
131 LOG_DBG("Initialized rwlock %p", rwl);
132
133 return 0;
134 }
135
136 /**
137 * @brief Destroy read-write lock object.
138 *
139 * See IEEE 1003.1
140 */
pthread_rwlock_destroy(pthread_rwlock_t * rwlock)141 int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
142 {
143 int ret = 0;
144 int err;
145 size_t bit;
146 struct posix_rwlock *rwl;
147
148 rwl = get_posix_rwlock(*rwlock);
149 if (rwl == NULL) {
150 return EINVAL;
151 }
152
153 K_SPINLOCK(&posix_rwlock_spinlock) {
154 if (rwl->wr_owner != NULL) {
155 ret = EBUSY;
156 K_SPINLOCK_BREAK;
157 }
158
159 bit = posix_rwlock_to_offset(rwl);
160 err = sys_bitarray_free(&posix_rwlock_bitarray, 1, bit);
161 __ASSERT_NO_MSG(err == 0);
162 }
163
164 return ret;
165 }
166
167 /**
168 * @brief Lock a read-write lock object for reading.
169 *
170 * API behaviour is unpredictable if number of concurrent reader
171 * lock held is greater than CONCURRENT_READER_LIMIT.
172 *
173 * See IEEE 1003.1
174 */
pthread_rwlock_rdlock(pthread_rwlock_t * rwlock)175 int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
176 {
177 struct posix_rwlock *rwl;
178
179 rwl = get_posix_rwlock(*rwlock);
180 if (rwl == NULL) {
181 return EINVAL;
182 }
183
184 return read_lock_acquire(rwl, SYS_FOREVER_MS);
185 }
186
187 /**
188 * @brief Lock a read-write lock object for reading within specific time.
189 *
190 * API behaviour is unpredictable if number of concurrent reader
191 * lock held is greater than CONCURRENT_READER_LIMIT.
192 *
193 * See IEEE 1003.1
194 */
pthread_rwlock_timedrdlock(pthread_rwlock_t * rwlock,const struct timespec * abstime)195 int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
196 const struct timespec *abstime)
197 {
198 int32_t timeout;
199 uint32_t ret = 0U;
200 struct posix_rwlock *rwl;
201
202 if (abstime->tv_nsec < 0 || abstime->tv_nsec > NSEC_PER_SEC) {
203 return EINVAL;
204 }
205
206 timeout = (int32_t) timespec_to_timeoutms(abstime);
207
208 rwl = get_posix_rwlock(*rwlock);
209 if (rwl == NULL) {
210 return EINVAL;
211 }
212
213 if (read_lock_acquire(rwl, timeout) != 0U) {
214 ret = ETIMEDOUT;
215 }
216
217 return ret;
218 }
219
220 /**
221 * @brief Lock a read-write lock object for reading immediately.
222 *
223 * API behaviour is unpredictable if number of concurrent reader
224 * lock held is greater than CONCURRENT_READER_LIMIT.
225 *
226 * See IEEE 1003.1
227 */
pthread_rwlock_tryrdlock(pthread_rwlock_t * rwlock)228 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
229 {
230 struct posix_rwlock *rwl;
231
232 rwl = get_posix_rwlock(*rwlock);
233 if (rwl == NULL) {
234 return EINVAL;
235 }
236
237 return read_lock_acquire(rwl, 0);
238 }
239
240 /**
241 * @brief Lock a read-write lock object for writing.
242 *
243 * Write lock does not have priority over reader lock,
244 * threads get lock based on priority.
245 *
246 * See IEEE 1003.1
247 */
pthread_rwlock_wrlock(pthread_rwlock_t * rwlock)248 int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
249 {
250 struct posix_rwlock *rwl;
251
252 rwl = get_posix_rwlock(*rwlock);
253 if (rwl == NULL) {
254 return EINVAL;
255 }
256
257 return write_lock_acquire(rwl, SYS_FOREVER_MS);
258 }
259
260 /**
261 * @brief Lock a read-write lock object for writing within specific time.
262 *
263 * Write lock does not have priority over reader lock,
264 * threads get lock based on priority.
265 *
266 * See IEEE 1003.1
267 */
pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlock,const struct timespec * abstime)268 int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
269 const struct timespec *abstime)
270 {
271 int32_t timeout;
272 uint32_t ret = 0U;
273 struct posix_rwlock *rwl;
274
275 if (abstime->tv_nsec < 0 || abstime->tv_nsec > NSEC_PER_SEC) {
276 return EINVAL;
277 }
278
279 timeout = (int32_t) timespec_to_timeoutms(abstime);
280
281 rwl = get_posix_rwlock(*rwlock);
282 if (rwl == NULL) {
283 return EINVAL;
284 }
285
286 if (write_lock_acquire(rwl, timeout) != 0U) {
287 ret = ETIMEDOUT;
288 }
289
290 return ret;
291 }
292
293 /**
294 * @brief Lock a read-write lock object for writing immediately.
295 *
296 * Write lock does not have priority over reader lock,
297 * threads get lock based on priority.
298 *
299 * See IEEE 1003.1
300 */
pthread_rwlock_trywrlock(pthread_rwlock_t * rwlock)301 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
302 {
303 struct posix_rwlock *rwl;
304
305 rwl = get_posix_rwlock(*rwlock);
306 if (rwl == NULL) {
307 return EINVAL;
308 }
309
310 return write_lock_acquire(rwl, 0);
311 }
312
313 /**
314 *
315 * @brief Unlock a read-write lock object.
316 *
317 * See IEEE 1003.1
318 */
pthread_rwlock_unlock(pthread_rwlock_t * rwlock)319 int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
320 {
321 struct posix_rwlock *rwl;
322
323 rwl = get_posix_rwlock(*rwlock);
324 if (rwl == NULL) {
325 return EINVAL;
326 }
327
328 if (k_current_get() == rwl->wr_owner) {
329 /* Write unlock */
330 rwl->wr_owner = NULL;
331 k_sem_give(&rwl->reader_active);
332 k_sem_give(&rwl->wr_sem);
333 } else {
334 /* Read unlock */
335 k_sem_give(&rwl->rd_sem);
336
337 if (k_sem_count_get(&rwl->rd_sem) == CONCURRENT_READER_LIMIT) {
338 /* Last read lock, unlock writer */
339 k_sem_give(&rwl->reader_active);
340 }
341 }
342 return 0;
343 }
344
read_lock_acquire(struct posix_rwlock * rwl,int32_t timeout)345 static uint32_t read_lock_acquire(struct posix_rwlock *rwl, int32_t timeout)
346 {
347 uint32_t ret = 0U;
348
349 if (k_sem_take(&rwl->wr_sem, SYS_TIMEOUT_MS(timeout)) == 0) {
350 k_sem_take(&rwl->reader_active, K_NO_WAIT);
351 k_sem_take(&rwl->rd_sem, K_NO_WAIT);
352 k_sem_give(&rwl->wr_sem);
353 } else {
354 ret = EBUSY;
355 }
356
357 return ret;
358 }
359
write_lock_acquire(struct posix_rwlock * rwl,int32_t timeout)360 static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout)
361 {
362 uint32_t ret = 0U;
363 int64_t elapsed_time, st_time = k_uptime_get();
364 k_timeout_t k_timeout;
365
366 k_timeout = SYS_TIMEOUT_MS(timeout);
367
368 /* waiting for release of write lock */
369 if (k_sem_take(&rwl->wr_sem, k_timeout) == 0) {
370 /* update remaining timeout time for 2nd sem */
371 if (timeout != SYS_FOREVER_MS) {
372 elapsed_time = k_uptime_get() - st_time;
373 timeout = timeout <= elapsed_time ? 0 :
374 timeout - elapsed_time;
375 }
376
377 k_timeout = SYS_TIMEOUT_MS(timeout);
378
379 /* waiting for reader to complete operation */
380 if (k_sem_take(&rwl->reader_active, k_timeout) == 0) {
381 rwl->wr_owner = k_current_get();
382 } else {
383 k_sem_give(&rwl->wr_sem);
384 ret = EBUSY;
385 }
386
387 } else {
388 ret = EBUSY;
389 }
390 return ret;
391 }
392
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * ZRESTRICT attr,int * ZRESTRICT pshared)393 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *ZRESTRICT attr,
394 int *ZRESTRICT pshared)
395 {
396 struct posix_rwlockattr *const a = (struct posix_rwlockattr *)attr;
397
398 if (a == NULL || !a->initialized) {
399 return EINVAL;
400 }
401
402 *pshared = a->pshared;
403
404 return 0;
405 }
406
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)407 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
408 {
409 struct posix_rwlockattr *const a = (struct posix_rwlockattr *)attr;
410
411 if (a == NULL || !a->initialized) {
412 return EINVAL;
413 }
414
415 if (!(pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED)) {
416 return EINVAL;
417 }
418
419 a->pshared = pshared;
420
421 return 0;
422 }
423
pthread_rwlockattr_init(pthread_rwlockattr_t * attr)424 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
425 {
426 struct posix_rwlockattr *const a = (struct posix_rwlockattr *)attr;
427
428 if (a == NULL) {
429 return EINVAL;
430 }
431
432 *a = (struct posix_rwlockattr){
433 .initialized = true,
434 .pshared = PTHREAD_PROCESS_PRIVATE,
435 };
436
437 return 0;
438 }
439
pthread_rwlockattr_destroy(pthread_rwlockattr_t * attr)440 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
441 {
442 struct posix_rwlockattr *const a = (struct posix_rwlockattr *)attr;
443
444 if (a == NULL || !a->initialized) {
445 return EINVAL;
446 }
447
448 *a = (struct posix_rwlockattr){0};
449
450 return 0;
451 }
452