1 /*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief fixed-size stack object
9 */
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/kernel_structs.h>
13
14 #include <zephyr/toolchain.h>
15 #include <ksched.h>
16 #include <wait_q.h>
17 #include <zephyr/sys/check.h>
18 #include <zephyr/init.h>
19 #include <zephyr/internal/syscall_handler.h>
20 #include <kernel_internal.h>
21
22 #ifdef CONFIG_OBJ_CORE_STACK
23 static struct k_obj_type obj_type_stack;
24 #endif
25
k_stack_init(struct k_stack * stack,stack_data_t * buffer,uint32_t num_entries)26 void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
27 uint32_t num_entries)
28 {
29 z_waitq_init(&stack->wait_q);
30 stack->lock = (struct k_spinlock) {};
31 stack->next = stack->base = buffer;
32 stack->top = stack->base + num_entries;
33
34 SYS_PORT_TRACING_OBJ_INIT(k_stack, stack);
35 k_object_init(stack);
36
37 #ifdef CONFIG_OBJ_CORE_STACK
38 k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
39 #endif
40 }
41
z_impl_k_stack_alloc_init(struct k_stack * stack,uint32_t num_entries)42 int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
43 {
44 void *buffer;
45 int32_t ret;
46
47 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, alloc_init, stack);
48
49 buffer = z_thread_malloc(num_entries * sizeof(stack_data_t));
50 if (buffer != NULL) {
51 k_stack_init(stack, buffer, num_entries);
52 stack->flags = K_STACK_FLAG_ALLOC;
53 ret = (int32_t)0;
54 } else {
55 ret = -ENOMEM;
56 }
57
58 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, alloc_init, stack, ret);
59
60 return ret;
61 }
62
63 #ifdef CONFIG_USERSPACE
z_vrfy_k_stack_alloc_init(struct k_stack * stack,uint32_t num_entries)64 static inline int32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
65 uint32_t num_entries)
66 {
67 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
68 K_OOPS(K_SYSCALL_VERIFY(num_entries > 0));
69 return z_impl_k_stack_alloc_init(stack, num_entries);
70 }
71 #include <syscalls/k_stack_alloc_init_mrsh.c>
72 #endif
73
k_stack_cleanup(struct k_stack * stack)74 int k_stack_cleanup(struct k_stack *stack)
75 {
76 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, cleanup, stack);
77
78 CHECKIF(z_waitq_head(&stack->wait_q) != NULL) {
79 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, cleanup, stack, -EAGAIN);
80
81 return -EAGAIN;
82 }
83
84 if ((stack->flags & K_STACK_FLAG_ALLOC) != (uint8_t)0) {
85 k_free(stack->base);
86 stack->base = NULL;
87 stack->flags &= ~K_STACK_FLAG_ALLOC;
88 }
89
90 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, cleanup, stack, 0);
91
92 return 0;
93 }
94
z_impl_k_stack_push(struct k_stack * stack,stack_data_t data)95 int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
96 {
97 struct k_thread *first_pending_thread;
98 int ret = 0;
99 k_spinlock_key_t key = k_spin_lock(&stack->lock);
100
101 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, push, stack);
102
103 CHECKIF(stack->next == stack->top) {
104 ret = -ENOMEM;
105 goto out;
106 }
107
108 first_pending_thread = z_unpend_first_thread(&stack->wait_q);
109
110 if (first_pending_thread != NULL) {
111 z_thread_return_value_set_with_data(first_pending_thread,
112 0, (void *)data);
113
114 z_ready_thread(first_pending_thread);
115 z_reschedule(&stack->lock, key);
116 goto end;
117 } else {
118 *(stack->next) = data;
119 stack->next++;
120 goto out;
121 }
122
123 out:
124 k_spin_unlock(&stack->lock, key);
125
126 end:
127 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, push, stack, ret);
128
129 return ret;
130 }
131
132 #ifdef CONFIG_USERSPACE
z_vrfy_k_stack_push(struct k_stack * stack,stack_data_t data)133 static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data)
134 {
135 K_OOPS(K_SYSCALL_OBJ(stack, K_OBJ_STACK));
136
137 return z_impl_k_stack_push(stack, data);
138 }
139 #include <syscalls/k_stack_push_mrsh.c>
140 #endif
141
z_impl_k_stack_pop(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout)142 int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
143 k_timeout_t timeout)
144 {
145 k_spinlock_key_t key;
146 int result;
147
148 key = k_spin_lock(&stack->lock);
149
150 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, pop, stack, timeout);
151
152 if (likely(stack->next > stack->base)) {
153 stack->next--;
154 *data = *(stack->next);
155 k_spin_unlock(&stack->lock, key);
156
157 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
158
159 return 0;
160 }
161
162 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_stack, pop, stack, timeout);
163
164 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
165 k_spin_unlock(&stack->lock, key);
166
167 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, -EBUSY);
168
169 return -EBUSY;
170 }
171
172 result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout);
173 if (result == -EAGAIN) {
174 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, -EAGAIN);
175
176 return -EAGAIN;
177 }
178
179 *data = (stack_data_t)_current->base.swap_data;
180
181 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
182
183 return 0;
184 }
185
186 #ifdef CONFIG_USERSPACE
z_vrfy_k_stack_pop(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout)187 static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
188 stack_data_t *data, k_timeout_t timeout)
189 {
190 K_OOPS(K_SYSCALL_OBJ(stack, K_OBJ_STACK));
191 K_OOPS(K_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t)));
192 return z_impl_k_stack_pop(stack, data, timeout);
193 }
194 #include <syscalls/k_stack_pop_mrsh.c>
195 #endif
196
197 #ifdef CONFIG_OBJ_CORE_STACK
init_stack_obj_core_list(void)198 static int init_stack_obj_core_list(void)
199 {
200 /* Initialize stack object type */
201
202 z_obj_type_init(&obj_type_stack, K_OBJ_TYPE_STACK_ID,
203 offsetof(struct k_stack, obj_core));
204
205 /* Initialize and link statically defined stacks */
206
207 STRUCT_SECTION_FOREACH(k_stack, stack) {
208 k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
209 }
210
211 return 0;
212 }
213
214 SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1,
215 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
216 #endif
217