1 /*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/kernel_structs.h>
16
17 #include <zephyr/toolchain.h>
18 #include <wait_q.h>
19 #include <ksched.h>
20 #include <zephyr/init.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <kernel_internal.h>
23 #include <zephyr/sys/check.h>
24
25 struct alloc_node {
26 sys_sfnode_t node;
27 void *data;
28 };
29
z_queue_node_peek(sys_sfnode_t * node,bool needs_free)30 void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
31 {
32 void *ret;
33
34 if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
35 /* If the flag is set, then the enqueue operation for this item
36 * did a behind-the scenes memory allocation of an alloc_node
37 * struct, which is what got put in the queue. Free it and pass
38 * back the data pointer.
39 */
40 struct alloc_node *anode;
41
42 anode = CONTAINER_OF(node, struct alloc_node, node);
43 ret = anode->data;
44 if (needs_free) {
45 k_free(anode);
46 }
47 } else {
48 /* Data was directly placed in the queue, the first word
49 * reserved for the linked list. User mode isn't allowed to
50 * do this, although it can get data sent this way.
51 */
52 ret = (void *)node;
53 }
54
55 return ret;
56 }
57
z_impl_k_queue_init(struct k_queue * queue)58 void z_impl_k_queue_init(struct k_queue *queue)
59 {
60 sys_sflist_init(&queue->data_q);
61 queue->lock = (struct k_spinlock) {};
62 z_waitq_init(&queue->wait_q);
63 #if defined(CONFIG_POLL)
64 sys_dlist_init(&queue->poll_events);
65 #endif
66
67 SYS_PORT_TRACING_OBJ_INIT(k_queue, queue);
68
69 k_object_init(queue);
70 }
71
72 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_init(struct k_queue * queue)73 static inline void z_vrfy_k_queue_init(struct k_queue *queue)
74 {
75 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
76 z_impl_k_queue_init(queue);
77 }
78 #include <zephyr/syscalls/k_queue_init_mrsh.c>
79 #endif /* CONFIG_USERSPACE */
80
prepare_thread_to_run(struct k_thread * thread,void * data)81 static void prepare_thread_to_run(struct k_thread *thread, void *data)
82 {
83 z_thread_return_value_set_with_data(thread, 0, data);
84 z_ready_thread(thread);
85 }
86
handle_poll_events(struct k_queue * queue,uint32_t state)87 static inline bool handle_poll_events(struct k_queue *queue, uint32_t state)
88 {
89 #ifdef CONFIG_POLL
90 return z_handle_obj_poll_events(&queue->poll_events, state);
91 #else
92 ARG_UNUSED(queue);
93 ARG_UNUSED(state);
94
95 return false;
96 #endif /* CONFIG_POLL */
97 }
98
z_impl_k_queue_cancel_wait(struct k_queue * queue)99 void z_impl_k_queue_cancel_wait(struct k_queue *queue)
100 {
101 SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue);
102
103 k_spinlock_key_t key = k_spin_lock(&queue->lock);
104 struct k_thread *first_pending_thread;
105 bool resched = false;
106
107 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
108
109 if (first_pending_thread != NULL) {
110 resched = true;
111 prepare_thread_to_run(first_pending_thread, NULL);
112 }
113
114 resched = handle_poll_events(queue, K_POLL_STATE_CANCELLED) || resched;
115
116 if (resched) {
117 z_reschedule(&queue->lock, key);
118 } else {
119 k_spin_unlock(&queue->lock, key);
120 }
121 }
122
123 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_cancel_wait(struct k_queue * queue)124 static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
125 {
126 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
127 z_impl_k_queue_cancel_wait(queue);
128 }
129 #include <zephyr/syscalls/k_queue_cancel_wait_mrsh.c>
130 #endif /* CONFIG_USERSPACE */
131
queue_insert(struct k_queue * queue,void * prev,void * data,bool alloc,bool is_append)132 static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
133 bool alloc, bool is_append)
134 {
135 struct k_thread *first_pending_thread;
136 k_spinlock_key_t key = k_spin_lock(&queue->lock);
137 int32_t result = 0;
138 bool resched = false;
139
140 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc);
141
142 if (is_append) {
143 prev = sys_sflist_peek_tail(&queue->data_q);
144 }
145 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
146
147 if (unlikely(first_pending_thread != NULL)) {
148 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
149
150 prepare_thread_to_run(first_pending_thread, data);
151 resched = true;
152 goto out;
153 }
154
155 /* Only need to actually allocate if no threads are pending */
156 if (alloc) {
157 struct alloc_node *anode;
158
159 anode = z_thread_malloc(sizeof(*anode));
160 if (anode == NULL) {
161 result = -ENOMEM;
162 goto out;
163 }
164 anode->data = data;
165 sys_sfnode_init(&anode->node, 0x1);
166 data = anode;
167 } else {
168 sys_sfnode_init(data, 0x0);
169 }
170
171 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
172
173 sys_sflist_insert(&queue->data_q, prev, data);
174 resched = handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
175
176 out:
177 if (resched) {
178 z_reschedule(&queue->lock, key);
179 } else {
180 k_spin_unlock(&queue->lock, key);
181 }
182
183 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, result);
184
185 return result;
186 }
187
k_queue_insert(struct k_queue * queue,void * prev,void * data)188 void k_queue_insert(struct k_queue *queue, void *prev, void *data)
189 {
190 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue);
191
192 (void)queue_insert(queue, prev, data, false, false);
193
194 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue);
195 }
196
k_queue_append(struct k_queue * queue,void * data)197 void k_queue_append(struct k_queue *queue, void *data)
198 {
199 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue);
200
201 (void)queue_insert(queue, NULL, data, false, true);
202
203 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue);
204 }
205
k_queue_prepend(struct k_queue * queue,void * data)206 void k_queue_prepend(struct k_queue *queue, void *data)
207 {
208 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue);
209
210 (void)queue_insert(queue, NULL, data, false, false);
211
212 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue);
213 }
214
z_impl_k_queue_alloc_append(struct k_queue * queue,void * data)215 int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
216 {
217 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue);
218
219 int32_t ret = queue_insert(queue, NULL, data, true, true);
220
221 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret);
222
223 return ret;
224 }
225
226 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_alloc_append(struct k_queue * queue,void * data)227 static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
228 void *data)
229 {
230 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
231 return z_impl_k_queue_alloc_append(queue, data);
232 }
233 #include <zephyr/syscalls/k_queue_alloc_append_mrsh.c>
234 #endif /* CONFIG_USERSPACE */
235
z_impl_k_queue_alloc_prepend(struct k_queue * queue,void * data)236 int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
237 {
238 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue);
239
240 int32_t ret = queue_insert(queue, NULL, data, true, false);
241
242 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret);
243
244 return ret;
245 }
246
247 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_alloc_prepend(struct k_queue * queue,void * data)248 static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
249 void *data)
250 {
251 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
252 return z_impl_k_queue_alloc_prepend(queue, data);
253 }
254 #include <zephyr/syscalls/k_queue_alloc_prepend_mrsh.c>
255 #endif /* CONFIG_USERSPACE */
256
k_queue_append_list(struct k_queue * queue,void * head,void * tail)257 int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
258 {
259 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
260 bool resched = false;
261
262 /* invalid head or tail of list */
263 CHECKIF((head == NULL) || (tail == NULL)) {
264 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
265
266 return -EINVAL;
267 }
268
269 k_spinlock_key_t key = k_spin_lock(&queue->lock);
270 struct k_thread *thread = NULL;
271
272 if (head != NULL) {
273 thread = z_unpend_first_thread(&queue->wait_q);
274 }
275
276 while ((head != NULL) && (thread != NULL)) {
277 resched = true;
278 prepare_thread_to_run(thread, head);
279 head = *(void **)head;
280 thread = z_unpend_first_thread(&queue->wait_q);
281 }
282
283 if (head != NULL) {
284 sys_sflist_append_list(&queue->data_q, head, tail);
285 }
286
287 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
288
289 resched = handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE) || resched;
290
291 if (resched) {
292 z_reschedule(&queue->lock, key);
293 } else {
294 k_spin_unlock(&queue->lock, key);
295 }
296
297 return 0;
298 }
299
k_queue_merge_slist(struct k_queue * queue,sys_slist_t * list)300 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
301 {
302 int ret;
303
304 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue);
305
306 /* list must not be empty */
307 CHECKIF(sys_slist_is_empty(list)) {
308 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL);
309
310 return -EINVAL;
311 }
312
313 /*
314 * note: this works as long as:
315 * - the slist implementation keeps the next pointer as the first
316 * field of the node object type
317 * - list->tail->next = NULL.
318 * - sflist implementation only differs from slist by stuffing
319 * flag bytes in the lower order bits of the data pointer
320 * - source list is really an slist and not an sflist with flags set
321 */
322 ret = k_queue_append_list(queue, list->head, list->tail);
323 CHECKIF(ret != 0) {
324 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret);
325
326 return ret;
327 }
328 sys_slist_init(list);
329
330 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0);
331
332 return 0;
333 }
334
z_impl_k_queue_get(struct k_queue * queue,k_timeout_t timeout)335 void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
336 {
337 k_spinlock_key_t key = k_spin_lock(&queue->lock);
338 void *data;
339
340 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout);
341
342 if (likely(!sys_sflist_is_empty(&queue->data_q))) {
343 sys_sfnode_t *node;
344
345 node = sys_sflist_get_not_empty(&queue->data_q);
346 data = z_queue_node_peek(node, true);
347 k_spin_unlock(&queue->lock, key);
348
349 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data);
350
351 return data;
352 }
353
354 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout);
355
356 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
357 k_spin_unlock(&queue->lock, key);
358
359 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL);
360
361 return NULL;
362 }
363
364 int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
365
366 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
367 (ret != 0) ? NULL : _current->base.swap_data);
368
369 return (ret != 0) ? NULL : _current->base.swap_data;
370 }
371
k_queue_remove(struct k_queue * queue,void * data)372 bool k_queue_remove(struct k_queue *queue, void *data)
373 {
374 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue);
375
376 bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
377
378 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret);
379
380 return ret;
381 }
382
k_queue_unique_append(struct k_queue * queue,void * data)383 bool k_queue_unique_append(struct k_queue *queue, void *data)
384 {
385 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue);
386
387 sys_sfnode_t *test;
388
389 SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
390 if (test == (sys_sfnode_t *) data) {
391 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false);
392
393 return false;
394 }
395 }
396
397 k_queue_append(queue, data);
398
399 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true);
400
401 return true;
402 }
403
z_impl_k_queue_peek_head(struct k_queue * queue)404 void *z_impl_k_queue_peek_head(struct k_queue *queue)
405 {
406 void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
407
408 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret);
409
410 return ret;
411 }
412
z_impl_k_queue_peek_tail(struct k_queue * queue)413 void *z_impl_k_queue_peek_tail(struct k_queue *queue)
414 {
415 void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
416
417 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret);
418
419 return ret;
420 }
421
422 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_get(struct k_queue * queue,k_timeout_t timeout)423 static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
424 k_timeout_t timeout)
425 {
426 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
427 return z_impl_k_queue_get(queue, timeout);
428 }
429 #include <zephyr/syscalls/k_queue_get_mrsh.c>
430
z_vrfy_k_queue_is_empty(struct k_queue * queue)431 static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
432 {
433 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
434 return z_impl_k_queue_is_empty(queue);
435 }
436 #include <zephyr/syscalls/k_queue_is_empty_mrsh.c>
437
z_vrfy_k_queue_peek_head(struct k_queue * queue)438 static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
439 {
440 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
441 return z_impl_k_queue_peek_head(queue);
442 }
443 #include <zephyr/syscalls/k_queue_peek_head_mrsh.c>
444
z_vrfy_k_queue_peek_tail(struct k_queue * queue)445 static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
446 {
447 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
448 return z_impl_k_queue_peek_tail(queue);
449 }
450 #include <zephyr/syscalls/k_queue_peek_tail_mrsh.c>
451
452 #endif /* CONFIG_USERSPACE */
453
454 #ifdef CONFIG_OBJ_CORE_FIFO
455 struct k_obj_type _obj_type_fifo;
456
init_fifo_obj_core_list(void)457 static int init_fifo_obj_core_list(void)
458 {
459 /* Initialize fifo object type */
460
461 z_obj_type_init(&_obj_type_fifo, K_OBJ_TYPE_FIFO_ID,
462 offsetof(struct k_fifo, obj_core));
463
464 /* Initialize and link statically defined fifos */
465
466 STRUCT_SECTION_FOREACH(k_fifo, fifo) {
467 k_obj_core_init_and_link(K_OBJ_CORE(fifo), &_obj_type_fifo);
468 }
469
470 return 0;
471 }
472
473 SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
474 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
475 #endif /* CONFIG_OBJ_CORE_FIFO */
476
477 #ifdef CONFIG_OBJ_CORE_LIFO
478 struct k_obj_type _obj_type_lifo;
479
init_lifo_obj_core_list(void)480 static int init_lifo_obj_core_list(void)
481 {
482 /* Initialize lifo object type */
483
484 z_obj_type_init(&_obj_type_lifo, K_OBJ_TYPE_LIFO_ID,
485 offsetof(struct k_lifo, obj_core));
486
487 /* Initialize and link statically defined lifo */
488
489 STRUCT_SECTION_FOREACH(k_lifo, lifo) {
490 k_obj_core_init_and_link(K_OBJ_CORE(lifo), &_obj_type_lifo);
491 }
492
493 return 0;
494 }
495
496 SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
497 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
498 #endif /* CONFIG_OBJ_CORE_LIFO */
499