1 /*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @brief Mailboxes.
9 */
10
11 #include <kernel.h>
12 #include <kernel_structs.h>
13
14 #include <toolchain.h>
15 #include <linker/sections.h>
16 #include <string.h>
17 #include <ksched.h>
18 #include <wait_q.h>
19 #include <sys/dlist.h>
20 #include <init.h>
21
22 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
23
24 /* asynchronous message descriptor type */
25 struct k_mbox_async {
26 struct _thread_base thread; /* dummy thread object */
27 struct k_mbox_msg tx_msg; /* transmit message descriptor */
28 };
29
30 /* stack of unused asynchronous message descriptors */
31 K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
32
33 /* allocate an asynchronous message descriptor */
mbox_async_alloc(struct k_mbox_async ** async)34 static inline void mbox_async_alloc(struct k_mbox_async **async)
35 {
36 (void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
37 }
38
39 /* free an asynchronous message descriptor */
mbox_async_free(struct k_mbox_async * async)40 static inline void mbox_async_free(struct k_mbox_async *async)
41 {
42 k_stack_push(&async_msg_free, (stack_data_t)async);
43 }
44
45 #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
46
47 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
48
49 /*
50 * Do run-time initialization of mailbox object subsystem.
51 */
init_mbox_module(const struct device * dev)52 static int init_mbox_module(const struct device *dev)
53 {
54 ARG_UNUSED(dev);
55
56 /* array of asynchronous message descriptors */
57 static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
58
59 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
60 /*
61 * Create pool of asynchronous message descriptors.
62 *
63 * A dummy thread requires minimal initialization, since it never gets
64 * to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
65 * dummy thread from a real one. The threads are *not* added to the
66 * kernel's list of known threads.
67 *
68 * Once initialized, the address of each descriptor is added to a stack
69 * that governs access to them.
70 */
71
72 int i;
73
74 for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
75 z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
76 k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
77 }
78 #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
79
80 /* Complete initialization of statically defined mailboxes. */
81
82 return 0;
83 }
84
85 SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
86
87 #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
88
k_mbox_init(struct k_mbox * mbox)89 void k_mbox_init(struct k_mbox *mbox)
90 {
91 z_waitq_init(&mbox->tx_msg_queue);
92 z_waitq_init(&mbox->rx_msg_queue);
93 mbox->lock = (struct k_spinlock) {};
94
95 SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
96 }
97
98 /**
99 * @brief Check compatibility of sender's and receiver's message descriptors.
100 *
101 * Compares sender's and receiver's message descriptors to see if they are
102 * compatible. If so, the descriptor fields are updated to reflect that a
103 * match has occurred.
104 *
105 * @param tx_msg Pointer to transmit message descriptor.
106 * @param rx_msg Pointer to receive message descriptor.
107 *
108 * @return 0 if successfully matched, otherwise -1.
109 */
mbox_message_match(struct k_mbox_msg * tx_msg,struct k_mbox_msg * rx_msg)110 static int mbox_message_match(struct k_mbox_msg *tx_msg,
111 struct k_mbox_msg *rx_msg)
112 {
113 uint32_t temp_info;
114
115 if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
116 (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
117 ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
118 (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
119
120 /* update thread identifier fields for both descriptors */
121 rx_msg->rx_source_thread = tx_msg->rx_source_thread;
122 tx_msg->tx_target_thread = rx_msg->tx_target_thread;
123
124 /* update application info fields for both descriptors */
125 temp_info = rx_msg->info;
126 rx_msg->info = tx_msg->info;
127 tx_msg->info = temp_info;
128
129 /* update data size field for receiver only */
130 if (rx_msg->size > tx_msg->size) {
131 rx_msg->size = tx_msg->size;
132 }
133
134 /* update data location fields for receiver only */
135 rx_msg->tx_data = tx_msg->tx_data;
136 rx_msg->tx_block = tx_msg->tx_block;
137 if (rx_msg->tx_data != NULL) {
138 rx_msg->tx_block.data = NULL;
139 } else if (rx_msg->tx_block.data != NULL) {
140 rx_msg->tx_data = rx_msg->tx_block.data;
141 } else {
142 /* no data */
143 }
144
145 /* update syncing thread field for receiver only */
146 rx_msg->_syncing_thread = tx_msg->_syncing_thread;
147
148 return 0;
149 }
150
151 return -1;
152 }
153
154 /**
155 * @brief Dispose of received message.
156 *
157 * Releases any memory pool block still associated with the message,
158 * then notifies the sender that message processing is complete.
159 *
160 * @param rx_msg Pointer to receive message descriptor.
161 *
162 * @return N/A
163 */
mbox_message_dispose(struct k_mbox_msg * rx_msg)164 static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
165 {
166 struct k_thread *sending_thread;
167 struct k_mbox_msg *tx_msg;
168
169 /* do nothing if message was disposed of when it was received */
170 if (rx_msg->_syncing_thread == NULL) {
171 return;
172 }
173
174 if (rx_msg->tx_block.data != NULL) {
175 rx_msg->tx_block.data = NULL;
176 }
177
178 /* recover sender info */
179 sending_thread = rx_msg->_syncing_thread;
180 rx_msg->_syncing_thread = NULL;
181 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
182
183 /* update data size field for sender */
184 tx_msg->size = rx_msg->size;
185
186 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
187 /*
188 * asynchronous send: free asynchronous message descriptor +
189 * dummy thread pair, then give semaphore (if needed)
190 */
191 if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
192 struct k_sem *async_sem = tx_msg->_async_sem;
193
194 mbox_async_free((struct k_mbox_async *)sending_thread);
195 if (async_sem != NULL) {
196 k_sem_give(async_sem);
197 }
198 return;
199 }
200 #endif
201
202 /* synchronous send: wake up sending thread */
203 arch_thread_return_value_set(sending_thread, 0);
204 z_mark_thread_as_not_pending(sending_thread);
205 z_ready_thread(sending_thread);
206 z_reschedule_unlocked();
207 }
208
209 /**
210 * @brief Send a mailbox message.
211 *
212 * Helper routine that handles both synchronous and asynchronous sends.
213 *
214 * @param mbox Pointer to the mailbox object.
215 * @param tx_msg Pointer to transmit message descriptor.
216 * @param timeout Maximum time (milliseconds) to wait for the message to be
217 * received (although not necessarily completely processed).
218 * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
219 * as necessary.
220 *
221 * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
222 */
mbox_message_put(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)223 static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
224 k_timeout_t timeout)
225 {
226 struct k_thread *sending_thread;
227 struct k_thread *receiving_thread;
228 struct k_mbox_msg *rx_msg;
229 k_spinlock_key_t key;
230
231 /* save sender id so it can be used during message matching */
232 tx_msg->rx_source_thread = _current;
233
234 /* finish readying sending thread (actual or dummy) for send */
235 sending_thread = tx_msg->_syncing_thread;
236 sending_thread->base.swap_data = tx_msg;
237
238 /* search mailbox's rx queue for a compatible receiver */
239 key = k_spin_lock(&mbox->lock);
240
241 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
242
243 _WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
244 rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
245
246 if (mbox_message_match(tx_msg, rx_msg) == 0) {
247 /* take receiver out of rx queue */
248 z_unpend_thread(receiving_thread);
249
250 /* ready receiver for execution */
251 arch_thread_return_value_set(receiving_thread, 0);
252 z_ready_thread(receiving_thread);
253
254 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
255 /*
256 * asynchronous send: swap out current thread
257 * if receiver has priority, otherwise let it continue
258 *
259 * note: dummy sending thread sits (unqueued)
260 * until the receiver consumes the message
261 */
262 if ((sending_thread->base.thread_state & _THREAD_DUMMY)
263 != 0U) {
264 z_reschedule(&mbox->lock, key);
265 return 0;
266 }
267 #endif
268 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
269
270 /*
271 * synchronous send: pend current thread (unqueued)
272 * until the receiver consumes the message
273 */
274 int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
275
276 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
277
278 return ret;
279 }
280 }
281
282 /* didn't find a matching receiver: don't wait for one */
283 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
284 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
285
286 k_spin_unlock(&mbox->lock, key);
287 return -ENOMSG;
288 }
289
290 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
291 /* asynchronous send: dummy thread waits on tx queue for receiver */
292 if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
293 z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
294 k_spin_unlock(&mbox->lock, key);
295 return 0;
296 }
297 #endif
298 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
299
300 /* synchronous send: sender waits on tx queue for receiver or timeout */
301 int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
302
303 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
304
305 return ret;
306 }
307
k_mbox_put(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)308 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
309 k_timeout_t timeout)
310 {
311 /* configure things for a synchronous send, then send the message */
312 tx_msg->_syncing_thread = _current;
313
314 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
315
316 int ret = mbox_message_put(mbox, tx_msg, timeout);
317
318 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
319
320 return ret;
321 }
322
323 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
k_mbox_async_put(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,struct k_sem * sem)324 void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
325 struct k_sem *sem)
326 {
327 struct k_mbox_async *async;
328
329 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
330
331 /*
332 * allocate an asynchronous message descriptor, configure both parts,
333 * then send the message asynchronously
334 */
335 mbox_async_alloc(&async);
336
337 async->thread.prio = _current->base.prio;
338
339 async->tx_msg = *tx_msg;
340 async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
341 async->tx_msg._async_sem = sem;
342
343 (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
344 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
345 }
346 #endif
347
k_mbox_data_get(struct k_mbox_msg * rx_msg,void * buffer)348 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
349 {
350 /* handle case where data is to be discarded */
351 if (buffer == NULL) {
352 rx_msg->size = 0;
353 mbox_message_dispose(rx_msg);
354 return;
355 }
356
357 /* copy message data to buffer, then dispose of message */
358 if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
359 (void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
360 }
361 mbox_message_dispose(rx_msg);
362 }
363
364 /**
365 * @brief Handle immediate consumption of received mailbox message data.
366 *
367 * Checks to see if received message data should be kept for later retrieval,
368 * or if the data should consumed immediately and the message disposed of.
369 *
370 * The data is consumed immediately in either of the following cases:
371 * 1) The receiver requested immediate retrieval by suppling a buffer
372 * to receive the data.
373 * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
374 *
375 * @param rx_msg Pointer to receive message descriptor.
376 * @param buffer Pointer to buffer to receive data.
377 *
378 * @return 0
379 */
mbox_message_data_check(struct k_mbox_msg * rx_msg,void * buffer)380 static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
381 {
382 if (buffer != NULL) {
383 /* retrieve data now, then dispose of message */
384 k_mbox_data_get(rx_msg, buffer);
385 } else if (rx_msg->size == 0U) {
386 /* there is no data to get, so just dispose of message */
387 mbox_message_dispose(rx_msg);
388 } else {
389 /* keep message around for later data retrieval */
390 }
391
392 return 0;
393 }
394
k_mbox_get(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)395 int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
396 k_timeout_t timeout)
397 {
398 struct k_thread *sending_thread;
399 struct k_mbox_msg *tx_msg;
400 k_spinlock_key_t key;
401 int result;
402
403 /* save receiver id so it can be used during message matching */
404 rx_msg->tx_target_thread = _current;
405
406 /* search mailbox's tx queue for a compatible sender */
407 key = k_spin_lock(&mbox->lock);
408
409 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
410
411 _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
412 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
413
414 if (mbox_message_match(tx_msg, rx_msg) == 0) {
415 /* take sender out of mailbox's tx queue */
416 z_unpend_thread(sending_thread);
417
418 k_spin_unlock(&mbox->lock, key);
419
420 /* consume message data immediately, if needed */
421 result = mbox_message_data_check(rx_msg, buffer);
422
423 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
424 return result;
425 }
426 }
427
428 /* didn't find a matching sender */
429
430 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
431 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
432
433 /* don't wait for a matching sender to appear */
434 k_spin_unlock(&mbox->lock, key);
435 return -ENOMSG;
436 }
437
438 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
439
440 /* wait until a matching sender appears or a timeout occurs */
441 _current->base.swap_data = rx_msg;
442 result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
443
444 /* consume message data immediately, if needed */
445 if (result == 0) {
446 result = mbox_message_data_check(rx_msg, buffer);
447 }
448
449 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
450
451 return result;
452 }
453