1 /*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/kernel_structs.h>
16
17 #include <zephyr/toolchain.h>
18 #include <wait_q.h>
19 #include <ksched.h>
20 #include <zephyr/init.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <kernel_internal.h>
23 #include <zephyr/sys/check.h>
24
25 struct alloc_node {
26 sys_sfnode_t node;
27 void *data;
28 };
29
z_queue_node_peek(sys_sfnode_t * node,bool needs_free)30 void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
31 {
32 void *ret;
33
34 if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
35 /* If the flag is set, then the enqueue operation for this item
36 * did a behind-the scenes memory allocation of an alloc_node
37 * struct, which is what got put in the queue. Free it and pass
38 * back the data pointer.
39 */
40 struct alloc_node *anode;
41
42 anode = CONTAINER_OF(node, struct alloc_node, node);
43 ret = anode->data;
44 if (needs_free) {
45 k_free(anode);
46 }
47 } else {
48 /* Data was directly placed in the queue, the first word
49 * reserved for the linked list. User mode isn't allowed to
50 * do this, although it can get data sent this way.
51 */
52 ret = (void *)node;
53 }
54
55 return ret;
56 }
57
z_impl_k_queue_init(struct k_queue * queue)58 void z_impl_k_queue_init(struct k_queue *queue)
59 {
60 sys_sflist_init(&queue->data_q);
61 queue->lock = (struct k_spinlock) {};
62 z_waitq_init(&queue->wait_q);
63 #if defined(CONFIG_POLL)
64 sys_dlist_init(&queue->poll_events);
65 #endif
66
67 SYS_PORT_TRACING_OBJ_INIT(k_queue, queue);
68
69 k_object_init(queue);
70 }
71
72 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_init(struct k_queue * queue)73 static inline void z_vrfy_k_queue_init(struct k_queue *queue)
74 {
75 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
76 z_impl_k_queue_init(queue);
77 }
78 #include <syscalls/k_queue_init_mrsh.c>
79 #endif
80
prepare_thread_to_run(struct k_thread * thread,void * data)81 static void prepare_thread_to_run(struct k_thread *thread, void *data)
82 {
83 z_thread_return_value_set_with_data(thread, 0, data);
84 z_ready_thread(thread);
85 }
86
handle_poll_events(struct k_queue * queue,uint32_t state)87 static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
88 {
89 #ifdef CONFIG_POLL
90 z_handle_obj_poll_events(&queue->poll_events, state);
91 #else
92 ARG_UNUSED(queue);
93 ARG_UNUSED(state);
94 #endif
95 }
96
z_impl_k_queue_cancel_wait(struct k_queue * queue)97 void z_impl_k_queue_cancel_wait(struct k_queue *queue)
98 {
99 SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue);
100
101 k_spinlock_key_t key = k_spin_lock(&queue->lock);
102 struct k_thread *first_pending_thread;
103
104 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
105
106 if (first_pending_thread != NULL) {
107 prepare_thread_to_run(first_pending_thread, NULL);
108 }
109
110 handle_poll_events(queue, K_POLL_STATE_CANCELLED);
111 z_reschedule(&queue->lock, key);
112 }
113
114 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_cancel_wait(struct k_queue * queue)115 static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
116 {
117 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
118 z_impl_k_queue_cancel_wait(queue);
119 }
120 #include <syscalls/k_queue_cancel_wait_mrsh.c>
121 #endif
122
queue_insert(struct k_queue * queue,void * prev,void * data,bool alloc,bool is_append)123 static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
124 bool alloc, bool is_append)
125 {
126 struct k_thread *first_pending_thread;
127 k_spinlock_key_t key = k_spin_lock(&queue->lock);
128
129 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc);
130
131 if (is_append) {
132 prev = sys_sflist_peek_tail(&queue->data_q);
133 }
134 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
135
136 if (first_pending_thread != NULL) {
137 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
138
139 prepare_thread_to_run(first_pending_thread, data);
140 z_reschedule(&queue->lock, key);
141
142 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
143
144 return 0;
145 }
146
147 /* Only need to actually allocate if no threads are pending */
148 if (alloc) {
149 struct alloc_node *anode;
150
151 anode = z_thread_malloc(sizeof(*anode));
152 if (anode == NULL) {
153 k_spin_unlock(&queue->lock, key);
154
155 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc,
156 -ENOMEM);
157
158 return -ENOMEM;
159 }
160 anode->data = data;
161 sys_sfnode_init(&anode->node, 0x1);
162 data = anode;
163 } else {
164 sys_sfnode_init(data, 0x0);
165 }
166
167 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
168
169 sys_sflist_insert(&queue->data_q, prev, data);
170 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
171 z_reschedule(&queue->lock, key);
172
173 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
174
175 return 0;
176 }
177
k_queue_insert(struct k_queue * queue,void * prev,void * data)178 void k_queue_insert(struct k_queue *queue, void *prev, void *data)
179 {
180 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue);
181
182 (void)queue_insert(queue, prev, data, false, false);
183
184 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue);
185 }
186
k_queue_append(struct k_queue * queue,void * data)187 void k_queue_append(struct k_queue *queue, void *data)
188 {
189 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue);
190
191 (void)queue_insert(queue, NULL, data, false, true);
192
193 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue);
194 }
195
k_queue_prepend(struct k_queue * queue,void * data)196 void k_queue_prepend(struct k_queue *queue, void *data)
197 {
198 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue);
199
200 (void)queue_insert(queue, NULL, data, false, false);
201
202 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue);
203 }
204
z_impl_k_queue_alloc_append(struct k_queue * queue,void * data)205 int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
206 {
207 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue);
208
209 int32_t ret = queue_insert(queue, NULL, data, true, true);
210
211 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret);
212
213 return ret;
214 }
215
216 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_alloc_append(struct k_queue * queue,void * data)217 static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
218 void *data)
219 {
220 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
221 return z_impl_k_queue_alloc_append(queue, data);
222 }
223 #include <syscalls/k_queue_alloc_append_mrsh.c>
224 #endif
225
z_impl_k_queue_alloc_prepend(struct k_queue * queue,void * data)226 int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
227 {
228 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue);
229
230 int32_t ret = queue_insert(queue, NULL, data, true, false);
231
232 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret);
233
234 return ret;
235 }
236
237 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_alloc_prepend(struct k_queue * queue,void * data)238 static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
239 void *data)
240 {
241 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
242 return z_impl_k_queue_alloc_prepend(queue, data);
243 }
244 #include <syscalls/k_queue_alloc_prepend_mrsh.c>
245 #endif
246
k_queue_append_list(struct k_queue * queue,void * head,void * tail)247 int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
248 {
249 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
250
251 /* invalid head or tail of list */
252 CHECKIF(head == NULL || tail == NULL) {
253 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
254
255 return -EINVAL;
256 }
257
258 k_spinlock_key_t key = k_spin_lock(&queue->lock);
259 struct k_thread *thread = NULL;
260
261 if (head != NULL) {
262 thread = z_unpend_first_thread(&queue->wait_q);
263 }
264
265 while ((head != NULL) && (thread != NULL)) {
266 prepare_thread_to_run(thread, head);
267 head = *(void **)head;
268 thread = z_unpend_first_thread(&queue->wait_q);
269 }
270
271 if (head != NULL) {
272 sys_sflist_append_list(&queue->data_q, head, tail);
273 }
274
275 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
276
277 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
278 z_reschedule(&queue->lock, key);
279 return 0;
280 }
281
k_queue_merge_slist(struct k_queue * queue,sys_slist_t * list)282 int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
283 {
284 int ret;
285
286 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue);
287
288 /* list must not be empty */
289 CHECKIF(sys_slist_is_empty(list)) {
290 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL);
291
292 return -EINVAL;
293 }
294
295 /*
296 * note: this works as long as:
297 * - the slist implementation keeps the next pointer as the first
298 * field of the node object type
299 * - list->tail->next = NULL.
300 * - sflist implementation only differs from slist by stuffing
301 * flag bytes in the lower order bits of the data pointer
302 * - source list is really an slist and not an sflist with flags set
303 */
304 ret = k_queue_append_list(queue, list->head, list->tail);
305 CHECKIF(ret != 0) {
306 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret);
307
308 return ret;
309 }
310 sys_slist_init(list);
311
312 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0);
313
314 return 0;
315 }
316
z_impl_k_queue_get(struct k_queue * queue,k_timeout_t timeout)317 void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
318 {
319 k_spinlock_key_t key = k_spin_lock(&queue->lock);
320 void *data;
321
322 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout);
323
324 if (likely(!sys_sflist_is_empty(&queue->data_q))) {
325 sys_sfnode_t *node;
326
327 node = sys_sflist_get_not_empty(&queue->data_q);
328 data = z_queue_node_peek(node, true);
329 k_spin_unlock(&queue->lock, key);
330
331 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data);
332
333 return data;
334 }
335
336 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout);
337
338 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
339 k_spin_unlock(&queue->lock, key);
340
341 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL);
342
343 return NULL;
344 }
345
346 int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
347
348 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
349 (ret != 0) ? NULL : _current->base.swap_data);
350
351 return (ret != 0) ? NULL : _current->base.swap_data;
352 }
353
k_queue_remove(struct k_queue * queue,void * data)354 bool k_queue_remove(struct k_queue *queue, void *data)
355 {
356 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue);
357
358 bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
359
360 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret);
361
362 return ret;
363 }
364
k_queue_unique_append(struct k_queue * queue,void * data)365 bool k_queue_unique_append(struct k_queue *queue, void *data)
366 {
367 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue);
368
369 sys_sfnode_t *test;
370
371 SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
372 if (test == (sys_sfnode_t *) data) {
373 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false);
374
375 return false;
376 }
377 }
378
379 k_queue_append(queue, data);
380
381 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true);
382
383 return true;
384 }
385
z_impl_k_queue_peek_head(struct k_queue * queue)386 void *z_impl_k_queue_peek_head(struct k_queue *queue)
387 {
388 void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
389
390 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret);
391
392 return ret;
393 }
394
z_impl_k_queue_peek_tail(struct k_queue * queue)395 void *z_impl_k_queue_peek_tail(struct k_queue *queue)
396 {
397 void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
398
399 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret);
400
401 return ret;
402 }
403
404 #ifdef CONFIG_USERSPACE
z_vrfy_k_queue_get(struct k_queue * queue,k_timeout_t timeout)405 static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
406 k_timeout_t timeout)
407 {
408 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
409 return z_impl_k_queue_get(queue, timeout);
410 }
411 #include <syscalls/k_queue_get_mrsh.c>
412
z_vrfy_k_queue_is_empty(struct k_queue * queue)413 static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
414 {
415 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
416 return z_impl_k_queue_is_empty(queue);
417 }
418 #include <syscalls/k_queue_is_empty_mrsh.c>
419
z_vrfy_k_queue_peek_head(struct k_queue * queue)420 static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
421 {
422 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
423 return z_impl_k_queue_peek_head(queue);
424 }
425 #include <syscalls/k_queue_peek_head_mrsh.c>
426
z_vrfy_k_queue_peek_tail(struct k_queue * queue)427 static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
428 {
429 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
430 return z_impl_k_queue_peek_tail(queue);
431 }
432 #include <syscalls/k_queue_peek_tail_mrsh.c>
433
434 #endif /* CONFIG_USERSPACE */
435
436 #ifdef CONFIG_OBJ_CORE_FIFO
437 struct k_obj_type _obj_type_fifo;
438
init_fifo_obj_core_list(void)439 static int init_fifo_obj_core_list(void)
440 {
441 /* Initialize fifo object type */
442
443 z_obj_type_init(&_obj_type_fifo, K_OBJ_TYPE_FIFO_ID,
444 offsetof(struct k_fifo, obj_core));
445
446 /* Initialize and link statically defined fifos */
447
448 STRUCT_SECTION_FOREACH(k_fifo, fifo) {
449 k_obj_core_init_and_link(K_OBJ_CORE(fifo), &_obj_type_fifo);
450 }
451
452 return 0;
453 }
454
455 SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
456 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
457 #endif
458
459 #ifdef CONFIG_OBJ_CORE_LIFO
460 struct k_obj_type _obj_type_lifo;
461
init_lifo_obj_core_list(void)462 static int init_lifo_obj_core_list(void)
463 {
464 /* Initialize lifo object type */
465
466 z_obj_type_init(&_obj_type_lifo, K_OBJ_TYPE_LIFO_ID,
467 offsetof(struct k_lifo, obj_core));
468
469 /* Initialize and link statically defined lifo */
470
471 STRUCT_SECTION_FOREACH(k_lifo, lifo) {
472 k_obj_core_init_and_link(K_OBJ_CORE(lifo), &_obj_type_lifo);
473 }
474
475 return 0;
476 }
477
478 SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
479 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
480 #endif
481