1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/kernel.h>
9
10 /* global values and data structures */
11 struct fifo_msg {
12 void *private;
13 uint32_t msg;
14 };
15
16 #define SIGNAL_RESULT 0x1ee7d00d
17 #define FIFO_MSG_VALUE 0xdeadbeef
18 #define MSGQ_MSG_SIZE 4
19 #define MSGQ_MAX_MSGS 16
20 #define MSGQ_MSG_VALUE {'a', 'b', 'c', 'd'}
21 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
22
23 /* verify k_poll() without waiting */
24 static struct k_sem no_wait_sem;
25 static struct k_fifo no_wait_fifo;
26 static struct k_poll_signal no_wait_signal;
27 static struct k_poll_signal test_signal;
28 #ifndef CONFIG_USERSPACE
29 static struct k_msgq no_wait_msgq;
30 #endif
31 static struct k_sem zero_events_sem;
32 static struct k_thread test_thread;
33 static struct k_thread test_loprio_thread;
34 K_THREAD_STACK_DEFINE(test_stack, STACK_SIZE);
35 K_THREAD_STACK_DEFINE(test_loprio_stack, STACK_SIZE);
36
37 /**
38 * @brief Test cases to verify poll
39 *
40 * @defgroup kernel_poll_tests Poll tests
41 *
42 * @ingroup all_tests
43 *
44 * @{
45 * @}
46 */
47
48 /**
49 * @brief Test poll events with no wait
50 *
51 * @ingroup kernel_poll_tests
52 *
53 * @see K_POLL_EVENT_INITIALIZER(), k_poll_signal_init(),
54 * k_poll_signal_raise(), k_poll_signal_check()
55 */
ZTEST_USER(poll_api_1cpu,test_poll_no_wait)56 ZTEST_USER(poll_api_1cpu, test_poll_no_wait)
57 {
58 struct fifo_msg msg = { NULL, FIFO_MSG_VALUE }, *msg_ptr;
59 unsigned int signaled;
60 char msgq_recv_buf[MSGQ_MSG_SIZE] = {0};
61 char msgq_msg[MSGQ_MSG_SIZE] = MSGQ_MSG_VALUE;
62 int result;
63 struct k_msgq *mq;
64 #ifdef CONFIG_USERSPACE
65 mq = k_object_alloc(K_OBJ_MSGQ);
66 zassert_not_null(mq, "");
67 #else
68 mq = &no_wait_msgq;
69 #endif
70
71 k_sem_init(&no_wait_sem, 1, 1);
72 k_fifo_init(&no_wait_fifo);
73 k_poll_signal_init(&no_wait_signal);
74
75 k_msgq_alloc_init(mq, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
76
77 struct k_poll_event events[] = {
78 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
79 K_POLL_MODE_NOTIFY_ONLY,
80 &no_wait_sem),
81 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
82 K_POLL_MODE_NOTIFY_ONLY,
83 &no_wait_fifo),
84 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
85 K_POLL_MODE_NOTIFY_ONLY,
86 &no_wait_signal),
87 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
88 K_POLL_MODE_NOTIFY_ONLY,
89 NULL),
90 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
91 K_POLL_MODE_NOTIFY_ONLY,
92 mq),
93 };
94
95 #ifdef CONFIG_USERSPACE
96 /* Test that k_poll() syscall handler safely handles being
97 * fed garbage
98 *
99 * TODO: Where possible migrate these to the main k_poll()
100 * implementation
101 */
102
103 zassert_equal(k_poll(events, INT_MAX, K_NO_WAIT), -EINVAL);
104 zassert_equal(k_poll(events, 4096, K_NO_WAIT), -ENOMEM);
105
106 /* Allow zero events */
107 zassert_equal(k_poll(events, 0, K_NO_WAIT), -EAGAIN);
108
109 struct k_poll_event bad_events[] = {
110 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
111 K_POLL_NUM_MODES,
112 &no_wait_sem),
113 };
114 zassert_equal(k_poll(bad_events, ARRAY_SIZE(bad_events), K_NO_WAIT),
115 -EINVAL,
116 NULL);
117
118 /* can't use the initializer to misconstruct this */
119 struct k_poll_event bad_events2[] = {
120 { .type = 0xFU,
121 .state = K_POLL_STATE_NOT_READY,
122 .mode = K_POLL_MODE_NOTIFY_ONLY,
123 .obj = &no_wait_sem,
124 },
125 };
126 zassert_equal(k_poll(bad_events2, ARRAY_SIZE(bad_events), K_NO_WAIT),
127 -EINVAL,
128 NULL);
129 #endif /* CONFIG_USERSPACE */
130
131 /* test polling events that are already ready */
132 zassert_false(k_fifo_alloc_put(&no_wait_fifo, &msg));
133 k_poll_signal_raise(&no_wait_signal, SIGNAL_RESULT);
134 zassert_false(k_msgq_put(mq, msgq_msg, K_NO_WAIT));
135
136 zassert_equal(k_poll(events, ARRAY_SIZE(events), K_NO_WAIT), 0, "");
137
138 zassert_equal(events[0].state, K_POLL_STATE_SEM_AVAILABLE, "");
139 zassert_equal(k_sem_take(&no_wait_sem, K_NO_WAIT), 0, "");
140
141 zassert_equal(events[1].state, K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
142 msg_ptr = k_fifo_get(&no_wait_fifo, K_NO_WAIT);
143 zassert_not_null(msg_ptr, "");
144 zassert_equal(msg_ptr, &msg, "");
145 zassert_equal(msg_ptr->msg, FIFO_MSG_VALUE, "");
146
147 zassert_equal(events[2].state, K_POLL_STATE_SIGNALED, "");
148 k_poll_signal_check(&no_wait_signal, &signaled, &result);
149 zassert_not_equal(signaled, 0, "");
150 zassert_equal(result, SIGNAL_RESULT, "");
151
152 zassert_equal(events[3].state, K_POLL_STATE_NOT_READY, "");
153
154 zassert_equal(events[4].state, K_POLL_STATE_MSGQ_DATA_AVAILABLE, "");
155 zassert_false(k_msgq_get(mq, msgq_recv_buf, K_NO_WAIT));
156 zassert_false(memcmp(msgq_msg, msgq_recv_buf, MSGQ_MSG_SIZE), "");
157
158 /* verify events are not ready anymore (user has to clear them first) */
159 events[0].state = K_POLL_STATE_NOT_READY;
160 events[1].state = K_POLL_STATE_NOT_READY;
161 events[2].state = K_POLL_STATE_NOT_READY;
162 events[3].state = K_POLL_STATE_NOT_READY;
163 events[4].state = K_POLL_STATE_NOT_READY;
164 k_poll_signal_reset(&no_wait_signal);
165
166 zassert_equal(k_poll(events, ARRAY_SIZE(events), K_NO_WAIT), -EAGAIN,
167 "");
168 zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
169 zassert_equal(events[1].state, K_POLL_STATE_NOT_READY, "");
170 zassert_equal(events[2].state, K_POLL_STATE_NOT_READY, "");
171 zassert_equal(events[3].state, K_POLL_STATE_NOT_READY, "");
172 zassert_equal(events[4].state, K_POLL_STATE_NOT_READY, "");
173
174 zassert_not_equal(k_sem_take(&no_wait_sem, K_NO_WAIT), 0, "");
175 zassert_is_null(k_fifo_get(&no_wait_fifo, K_NO_WAIT), "");
176 zassert_not_equal(k_msgq_get(mq, msgq_recv_buf, K_NO_WAIT), 0,
177 "");
178 }
179
180 /* verify k_poll() that has to wait */
181 static struct k_msgq wait_msgq;
182 static struct k_msgq *wait_msgq_ptr;
183
184 static K_SEM_DEFINE(wait_sem, 0, 1);
185 static K_FIFO_DEFINE(wait_fifo);
186 static struct k_poll_signal wait_signal =
187 K_POLL_SIGNAL_INITIALIZER(wait_signal);
188
189 struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE };
190
191 #define TAG_0 10
192 #define TAG_1 11
193 #define TAG_2 12
194 #define TAG_3 13
195
196 struct k_poll_event wait_events[] = {
197 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
198 K_POLL_MODE_NOTIFY_ONLY,
199 &wait_sem, TAG_0),
200 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
201 K_POLL_MODE_NOTIFY_ONLY,
202 &wait_fifo, TAG_1),
203 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
204 K_POLL_MODE_NOTIFY_ONLY,
205 &wait_signal, TAG_2),
206 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
207 K_POLL_MODE_NOTIFY_ONLY,
208 NULL),
209 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
210 K_POLL_MODE_NOTIFY_ONLY,
211 &wait_msgq, TAG_3),
212 };
213
214 #define USE_FIFO (1 << 0)
215 #define USE_MSGQ (1 << 1)
216
poll_wait_helper(void * use_queuelike,void * msgq,void * p3)217 static void poll_wait_helper(void *use_queuelike, void *msgq, void *p3)
218 {
219 (void)p3;
220
221 k_sleep(K_MSEC(250));
222
223 k_sem_give(&wait_sem);
224
225 uintptr_t flags = (uintptr_t)use_queuelike;
226
227 if (flags & USE_FIFO) {
228 k_fifo_alloc_put(&wait_fifo, &wait_msg);
229 }
230
231 k_poll_signal_raise(&wait_signal, SIGNAL_RESULT);
232
233 if (flags & USE_MSGQ) {
234 char m[] = MSGQ_MSG_VALUE;
235
236 k_msgq_put(msgq, &m[0], K_FOREVER);
237 }
238 }
239
240 /* check results for multiple events */
check_results(struct k_poll_event * events,uint32_t event_type,bool is_available)241 void check_results(struct k_poll_event *events, uint32_t event_type,
242 bool is_available)
243 {
244 struct fifo_msg *msg_ptr;
245 char msgq_recv_buf[MSGQ_MSG_SIZE] = {0};
246 char msg[] = MSGQ_MSG_VALUE;
247
248 switch (event_type) {
249 case K_POLL_TYPE_SEM_AVAILABLE:
250 if (is_available) {
251 zassert_equal(events->state, K_POLL_STATE_SEM_AVAILABLE,
252 "");
253 zassert_equal(k_sem_take(&wait_sem, K_NO_WAIT), 0, "");
254 zassert_equal(events->tag, TAG_0, "");
255 /* reset to not ready */
256 events->state = K_POLL_STATE_NOT_READY;
257 } else {
258 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
259 "");
260 zassert_equal(k_sem_take(&wait_sem, K_NO_WAIT), -EBUSY,
261 "");
262 zassert_equal(events->tag, TAG_0, "");
263 }
264 break;
265 case K_POLL_TYPE_DATA_AVAILABLE:
266 if (is_available) {
267 zassert_equal(events->state,
268 K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
269 msg_ptr = k_fifo_get(&wait_fifo, K_NO_WAIT);
270 zassert_not_null(msg_ptr, "");
271 zassert_equal(msg_ptr, &wait_msg, "");
272 zassert_equal(msg_ptr->msg, FIFO_MSG_VALUE, "");
273 zassert_equal(events->tag, TAG_1, "");
274 /* reset to not ready */
275 events->state = K_POLL_STATE_NOT_READY;
276 } else {
277 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
278 "");
279 }
280 break;
281 case K_POLL_TYPE_SIGNAL:
282 if (is_available) {
283 zassert_equal(wait_events[2].state,
284 K_POLL_STATE_SIGNALED, "");
285 zassert_equal(wait_signal.signaled, 1, "");
286 zassert_equal(wait_signal.result, SIGNAL_RESULT, "");
287 zassert_equal(wait_events[2].tag, TAG_2, "");
288 /* reset to not ready */
289 events->state = K_POLL_STATE_NOT_READY;
290 wait_signal.signaled = 0U;
291 } else {
292 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
293 "");
294 }
295 break;
296 case K_POLL_TYPE_IGNORE:
297 zassert_equal(wait_events[3].state, K_POLL_STATE_NOT_READY, "");
298 break;
299 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
300 if (is_available) {
301 zassert_equal(events->state,
302 K_POLL_STATE_MSGQ_DATA_AVAILABLE, "");
303
304 zassert_false(k_msgq_get(wait_msgq_ptr, msgq_recv_buf,
305 K_NO_WAIT), "");
306 zassert_false(memcmp(msg, msgq_recv_buf,
307 MSGQ_MSG_SIZE), "");
308 zassert_equal(events->tag, TAG_3, "");
309 /* reset to not ready */
310 events->state = K_POLL_STATE_NOT_READY;
311 } else {
312 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
313 "");
314 }
315 break;
316
317 default:
318 __ASSERT(false, "invalid event type (0x%x)\n", event_type);
319 break;
320 }
321 }
322
323 /**
324 * @brief Test polling with wait
325 *
326 * @ingroup kernel_poll_tests
327 *
328 * @details
329 * Test Objective:
330 * - Test the poll operation which enables waiting concurrently
331 * for one/two/all conditions to be fulfilled.
332 * - set a single timeout argument indicating
333 * the maximum amount of time a thread shall wait.
334 *
335 * Testing techniques:
336 * - function and block box testing.
337 * - Interface testing.
338 * - Dynamic analysis and testing.
339 *
340 * Prerequisite Conditions:
341 * - CONFIG_TEST_USERSPACE
342 * - CONFIG_DYNAMIC_OBJECTS
343 * - CONFIG_POLL
344 *
345 * Input Specifications:
346 * - N/A
347 *
348 * Test Procedure:
349 * -# Use FIFO/semaphore/signal/message queue object to define poll event.
350 * -# Initialize the FIFO/semaphore/signal/message queue object.
351 * -# Create a thread to put FIFO,
352 * give semaphore, raise signal, and put message queue.
353 * -# Check the result when signal is raised,
354 * semaphore is given, fifo is filled, and message is received.
355 * -# Check the result when no event is satisfied.
356 * -# Check the result when only semaphore is given.
357 * -# Check the result when only FIFO is filled.
358 * -# Check the result when only signal is raised.
359 * -# Check the result when only message is received.
360 *
361 * Expected Test Result:
362 * - FIFO/semaphore/signal/message queue events available/waitable in poll.
363 *
364 * Pass/Fail Criteria:
365 * - Successful if check points in test procedure are all passed, otherwise failure.
366 *
367 * Assumptions and Constraints:
368 * - N/A
369 *
370 * @see k_poll_signal_init(), k_poll()
371 */
ZTEST(poll_api_1cpu,test_poll_wait)372 ZTEST(poll_api_1cpu, test_poll_wait)
373 {
374 const int main_low_prio = 10;
375
376 #ifdef CONFIG_USERSPACE
377 wait_msgq_ptr = k_object_alloc(K_OBJ_MSGQ);
378 k_msgq_alloc_init(wait_msgq_ptr, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
379
380 k_poll_event_init(&wait_events[4],
381 K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
382 K_POLL_MODE_NOTIFY_ONLY,
383 wait_msgq_ptr);
384 wait_events[4].tag = TAG_3;
385 #else
386 wait_msgq_ptr = &wait_msgq;
387 k_msgq_alloc_init(wait_msgq_ptr, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
388 #endif
389 int rc;
390
391 int old_prio = k_thread_priority_get(k_current_get());
392
393 k_poll_signal_init(&wait_signal);
394 /*
395 * Wait for 4 non-ready events to become ready from a higher priority
396 * thread.
397 */
398 k_thread_priority_set(k_current_get(), main_low_prio);
399
400 k_tid_t tid1 = k_thread_create(&test_thread, test_stack,
401 K_THREAD_STACK_SIZEOF(test_stack),
402 poll_wait_helper, (void *)(USE_FIFO | USE_MSGQ), wait_msgq_ptr, 0,
403 main_low_prio - 1, K_USER | K_INHERIT_PERMS,
404 K_NO_WAIT);
405
406 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_NO_WAIT);
407 zassert_equal(rc, -EAGAIN, "should return EAGAIN with K_NO_WAIT");
408
409 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
410
411 k_thread_priority_set(k_current_get(), old_prio);
412
413 zassert_equal(rc, 0, "");
414 /* all events should be available. */
415
416 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
417 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, true);
418 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
419 check_results(&wait_events[3], K_POLL_TYPE_IGNORE, true);
420 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, true);
421
422 /* verify events are not ready anymore */
423 zassert_equal(k_poll(wait_events, ARRAY_SIZE(wait_events),
424 K_SECONDS(1)), -EAGAIN, "");
425 /* all events should not be available. */
426
427 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
428 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
429 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
430 check_results(&wait_events[3], K_POLL_TYPE_IGNORE, false);
431 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
432
433 /*
434 * Wait for 2 out of 4 non-ready events to become ready from a higher
435 * priority thread.
436 */
437 k_thread_priority_set(k_current_get(), main_low_prio);
438
439 k_tid_t tid2 = k_thread_create(&test_thread, test_stack,
440 K_THREAD_STACK_SIZEOF(test_stack),
441 poll_wait_helper,
442 0, 0, 0, main_low_prio - 1, 0, K_NO_WAIT);
443
444 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
445
446 k_thread_priority_set(k_current_get(), old_prio);
447
448 zassert_equal(rc, 0, "");
449
450 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
451 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
452 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
453 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
454
455 /*
456 * Wait for each event to be ready from a lower priority thread, one at
457 * a time.
458 */
459 k_tid_t tid3 = k_thread_create(&test_thread, test_stack,
460 K_THREAD_STACK_SIZEOF(test_stack),
461 poll_wait_helper,
462 (void *)(USE_FIFO | USE_MSGQ), wait_msgq_ptr, 0, old_prio + 1,
463 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
464 /* semaphore */
465 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
466 zassert_equal(rc, 0, "");
467
468 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
469 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
470 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
471 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
472
473 /* fifo */
474 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
475
476 zassert_equal(rc, 0, "");
477
478 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
479 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, true);
480 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
481 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
482
483 /* poll signal */
484 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
485
486 zassert_equal(rc, 0, "");
487
488 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
489 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
490 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
491 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
492
493 /* message queue */
494 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
495
496 zassert_equal(rc, 0, "");
497
498 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
499 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
500 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
501 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, true);
502
503 k_thread_abort(tid1);
504 k_thread_abort(tid2);
505 k_thread_abort(tid3);
506 }
507
508 /* verify k_poll() that waits on object which gets cancellation */
509
510 static struct k_fifo cancel_fifo;
511 static struct k_fifo non_cancel_fifo;
512
poll_cancel_helper(void * p1,void * p2,void * p3)513 static void poll_cancel_helper(void *p1, void *p2, void *p3)
514 {
515 (void)p1; (void)p2; (void)p3;
516
517 static struct fifo_msg msg;
518
519 k_sleep(K_MSEC(100));
520
521 k_fifo_cancel_wait(&cancel_fifo);
522
523 k_fifo_alloc_put(&non_cancel_fifo, &msg);
524 }
525
526 /**
527 * @brief Test polling of cancelled fifo
528 *
529 * @details Test the FIFO(queue) data available/cancelable events
530 * as events in poll.
531 *
532 * @ingroup kernel_poll_tests
533 *
534 * @see k_poll(), k_fifo_cancel_wait(), k_fifo_alloc_put
535 */
test_poll_cancel(bool is_main_low_prio)536 void test_poll_cancel(bool is_main_low_prio)
537 {
538 const int main_low_prio = 10;
539 int old_prio = k_thread_priority_get(k_current_get());
540 int rc;
541
542 struct k_poll_event cancel_events[] = {
543 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
544 K_POLL_MODE_NOTIFY_ONLY,
545 &cancel_fifo),
546 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
547 K_POLL_MODE_NOTIFY_ONLY,
548 &non_cancel_fifo),
549 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
550 K_POLL_MODE_NOTIFY_ONLY,
551 NULL),
552 };
553
554 k_fifo_init(&cancel_fifo);
555 k_fifo_init(&non_cancel_fifo);
556
557 if (is_main_low_prio) {
558 k_thread_priority_set(k_current_get(), main_low_prio);
559 }
560
561 k_tid_t tid = k_thread_create(&test_thread, test_stack,
562 K_THREAD_STACK_SIZEOF(test_stack),
563 poll_cancel_helper, (void *)1, 0, 0,
564 main_low_prio - 1, K_USER | K_INHERIT_PERMS,
565 K_NO_WAIT);
566
567 rc = k_poll(cancel_events, ARRAY_SIZE(cancel_events), K_SECONDS(1));
568
569 k_thread_priority_set(k_current_get(), old_prio);
570
571 zassert_equal(rc, -EINTR, "");
572
573 zassert_equal(cancel_events[0].state,
574 K_POLL_STATE_CANCELLED, "");
575
576 if (is_main_low_prio) {
577 /* If poller thread is lower priority than threads which
578 * generate poll events, it may get multiple poll events
579 * at once.
580 */
581 zassert_equal(cancel_events[1].state,
582 K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
583 } else {
584 /* Otherwise, poller thread will be woken up on first
585 * event triggered.
586 */
587 zassert_equal(cancel_events[1].state,
588 K_POLL_STATE_NOT_READY, "");
589 }
590
591 k_thread_abort(tid);
592 }
593
ZTEST(poll_api_1cpu,test_poll_cancel_main_low_prio)594 ZTEST(poll_api_1cpu, test_poll_cancel_main_low_prio)
595 {
596 test_poll_cancel(true);
597 }
598
ZTEST(poll_api_1cpu,test_poll_cancel_main_high_prio)599 ZTEST(poll_api_1cpu, test_poll_cancel_main_high_prio)
600 {
601 test_poll_cancel(false);
602 }
603
604 /* verify multiple pollers */
605 static K_SEM_DEFINE(multi_sem, 0, 1);
606
multi_lowprio(void * p1,void * p2,void * p3)607 static void multi_lowprio(void *p1, void *p2, void *p3)
608 {
609 (void)p1; (void)p2; (void)p3;
610
611 struct k_poll_event event;
612 int rc;
613
614 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
615 K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
616
617 (void)k_poll(&event, 1, K_FOREVER);
618 rc = k_sem_take(&multi_sem, K_FOREVER);
619 zassert_equal(rc, 0, "");
620 }
621
622 static K_SEM_DEFINE(multi_reply, 0, 1);
623
multi(void * p1,void * p2,void * p3)624 static void multi(void *p1, void *p2, void *p3)
625 {
626 (void)p1; (void)p2; (void)p3;
627
628 struct k_poll_event event;
629
630 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
631 K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
632
633 (void)k_poll(&event, 1, K_FOREVER);
634 k_sem_take(&multi_sem, K_FOREVER);
635 k_sem_give(&multi_reply);
636 }
637
638 static K_SEM_DEFINE(multi_ready_sem, 1, 1);
639
640 /**
641 * @brief Test polling of multiple events
642 *
643 * @details
644 * - Test the multiple semaphore events as waitable events in poll.
645 *
646 * @ingroup kernel_poll_tests
647 *
648 * @see K_POLL_EVENT_INITIALIZER(), k_poll(), k_poll_event_init()
649 */
ZTEST(poll_api,test_poll_multi)650 ZTEST(poll_api, test_poll_multi)
651 {
652 int old_prio = k_thread_priority_get(k_current_get());
653 const int main_low_prio = 10;
654 int rc;
655
656 struct k_poll_event events[] = {
657 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
658 K_POLL_MODE_NOTIFY_ONLY,
659 &multi_sem),
660 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
661 K_POLL_MODE_NOTIFY_ONLY,
662 &multi_ready_sem),
663 };
664
665 k_thread_priority_set(k_current_get(), main_low_prio);
666
667 k_tid_t tid1 = k_thread_create(&test_thread, test_stack,
668 K_THREAD_STACK_SIZEOF(test_stack),
669 multi, 0, 0, 0, main_low_prio - 1,
670 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
671
672 /*
673 * create additional thread to add multiple(more than one)
674 * pending threads in events list to improve code coverage.
675 */
676 k_tid_t tid2 = k_thread_create(&test_loprio_thread, test_loprio_stack,
677 K_THREAD_STACK_SIZEOF(test_loprio_stack),
678 multi_lowprio, 0, 0, 0, main_low_prio + 1,
679 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
680
681 /* Allow lower priority thread to add poll event in the list */
682 k_sleep(K_MSEC(250));
683 rc = k_poll(events, ARRAY_SIZE(events), K_SECONDS(1));
684
685 zassert_equal(rc, 0, "");
686 zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
687 zassert_equal(events[1].state, K_POLL_STATE_SEM_AVAILABLE, "");
688
689 /*
690 * free polling threads, ensuring it awoken from k_poll()
691 * and got the sem
692 */
693 k_sem_give(&multi_sem);
694 k_sem_give(&multi_sem);
695 rc = k_sem_take(&multi_reply, K_SECONDS(1));
696
697 zassert_equal(rc, 0, "");
698
699 /* wait for polling threads to complete execution */
700 k_thread_priority_set(k_current_get(), old_prio);
701 k_sleep(K_MSEC(250));
702
703 k_thread_abort(tid1);
704 k_thread_abort(tid2);
705 }
706
707 static struct k_poll_signal signal;
708
threadstate(void * p1,void * p2,void * p3)709 static void threadstate(void *p1, void *p2, void *p3)
710 {
711 (void)p2; (void)p3;
712
713 k_sleep(K_MSEC(250));
714 /* Update polling thread state explicitly to improve code coverage */
715 k_thread_suspend(p1);
716 /* Enable polling thread by signalling */
717 k_poll_signal_raise(&signal, SIGNAL_RESULT);
718 k_thread_resume(p1);
719 }
720
721 /**
722 * @brief Test polling of events by manipulating polling thread state
723 *
724 * @details
725 * - manipulating thread state to consider case where no polling thread
726 * is available during event signalling.
727 * - defined a signal poll as waitable events in poll and
728 * verify the result after signal raised
729 *
730 * @ingroup kernel_poll_tests
731 *
732 * @see K_POLL_EVENT_INITIALIZER(), k_poll(), k_poll_signal_init(),
733 * k_poll_signal_check(), k_poll_signal_raise()
734 */
ZTEST(poll_api_1cpu,test_poll_threadstate)735 ZTEST(poll_api_1cpu, test_poll_threadstate)
736 {
737 unsigned int signaled;
738 const int main_low_prio = 10;
739 int result;
740
741 k_poll_signal_init(&signal);
742
743 struct k_poll_event event;
744
745 k_poll_event_init(&event, K_POLL_TYPE_SIGNAL,
746 K_POLL_MODE_NOTIFY_ONLY, &signal);
747
748 int old_prio = k_thread_priority_get(k_current_get());
749
750 k_thread_priority_set(k_current_get(), main_low_prio);
751 k_tid_t ztest_tid = k_current_get();
752
753 k_tid_t tid = k_thread_create(&test_thread, test_stack,
754 K_THREAD_STACK_SIZEOF(test_stack), threadstate,
755 ztest_tid, 0, 0, main_low_prio - 1, K_INHERIT_PERMS,
756 K_NO_WAIT);
757
758 /* wait for spawn thread to take action */
759 zassert_equal(k_poll(&event, 1, K_SECONDS(1)), 0, "");
760 zassert_equal(event.state, K_POLL_STATE_SIGNALED, "");
761 k_poll_signal_check(&signal, &signaled, &result);
762 zassert_not_equal(signaled, 0, "");
763 zassert_equal(result, SIGNAL_RESULT, "");
764
765 event.state = K_POLL_STATE_NOT_READY;
766 k_poll_signal_reset(&signal);
767 /* teardown */
768 k_thread_priority_set(k_current_get(), old_prio);
769
770 k_thread_abort(tid);
771 }
772
poll_test_grant_access(void)773 void poll_test_grant_access(void)
774 {
775 k_thread_access_grant(k_current_get(), &no_wait_sem, &no_wait_fifo,
776 &no_wait_signal, &wait_sem, &wait_fifo,
777 &cancel_fifo, &non_cancel_fifo,
778 &wait_signal, &test_thread, &test_signal,
779 &test_stack, &multi_sem, &multi_reply);
780 }
781
ZTEST(poll_api_1cpu,test_poll_zero_events)782 ZTEST(poll_api_1cpu, test_poll_zero_events)
783 {
784 struct k_poll_event event;
785
786 k_sem_init(&zero_events_sem, 1, 1);
787
788 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
789 K_POLL_MODE_NOTIFY_ONLY, &zero_events_sem);
790
791 zassert_equal(k_poll(&event, 0, K_MSEC(50)), -EAGAIN);
792 }
793