1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <ztest.h>
8 #include <kernel.h>
9
10 /* global values and data structures */
11 struct fifo_msg {
12 void *private;
13 uint32_t msg;
14 };
15
16 #define SIGNAL_RESULT 0x1ee7d00d
17 #define FIFO_MSG_VALUE 0xdeadbeef
18 #define MSGQ_MSG_SIZE 4
19 #define MSGQ_MAX_MSGS 16
20 #define MSGQ_MSG_VALUE {'a', 'b', 'c', 'd'}
21 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACKSIZE)
22
23 /* verify k_poll() without waiting */
24 static struct k_sem no_wait_sem;
25 static struct k_fifo no_wait_fifo;
26 static struct k_poll_signal no_wait_signal;
27 static struct k_poll_signal test_signal;
28 #ifndef CONFIG_USERSPACE
29 static struct k_msgq no_wait_msgq;
30 #endif
31 static struct k_sem zero_events_sem;
32 static struct k_thread test_thread;
33 static struct k_thread test_loprio_thread;
34 K_THREAD_STACK_DEFINE(test_stack, STACK_SIZE);
35 K_THREAD_STACK_DEFINE(test_loprio_stack, STACK_SIZE);
36
37 /**
38 * @brief Test cases to verify poll
39 *
40 * @defgroup kernel_poll_tests Poll tests
41 *
42 * @ingroup all_tests
43 *
44 * @{
45 * @}
46 */
47
48 /**
49 * @brief Test poll events with no wait
50 *
51 * @ingroup kernel_poll_tests
52 *
53 * @see K_POLL_EVENT_INITIALIZER(), k_poll_signal_init(),
54 * k_poll_signal_raise(), k_poll_signal_check()
55 */
test_poll_no_wait(void)56 void test_poll_no_wait(void)
57 {
58 struct fifo_msg msg = { NULL, FIFO_MSG_VALUE }, *msg_ptr;
59 unsigned int signaled;
60 char msgq_recv_buf[MSGQ_MSG_SIZE] = {0};
61 char msgq_msg[MSGQ_MSG_SIZE] = MSGQ_MSG_VALUE;
62 int result;
63 struct k_msgq *mq;
64 #ifdef CONFIG_USERSPACE
65 mq = k_object_alloc(K_OBJ_MSGQ);
66 zassert_not_null(mq, "");
67 #else
68 mq = &no_wait_msgq;
69 #endif
70
71 k_sem_init(&no_wait_sem, 1, 1);
72 k_fifo_init(&no_wait_fifo);
73 k_poll_signal_init(&no_wait_signal);
74
75 k_msgq_alloc_init(mq, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
76
77 struct k_poll_event events[] = {
78 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
79 K_POLL_MODE_NOTIFY_ONLY,
80 &no_wait_sem),
81 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
82 K_POLL_MODE_NOTIFY_ONLY,
83 &no_wait_fifo),
84 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
85 K_POLL_MODE_NOTIFY_ONLY,
86 &no_wait_signal),
87 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
88 K_POLL_MODE_NOTIFY_ONLY,
89 NULL),
90 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
91 K_POLL_MODE_NOTIFY_ONLY,
92 mq),
93 };
94
95 #ifdef CONFIG_USERSPACE
96 /* Test that k_poll() syscall handler safely handles being
97 * fed garbage
98 *
99 * TODO: Where possible migrate these to the main k_poll()
100 * implementation
101 */
102
103 zassert_equal(k_poll(events, INT_MAX, K_NO_WAIT), -EINVAL, NULL);
104 zassert_equal(k_poll(events, 4096, K_NO_WAIT), -ENOMEM, NULL);
105
106 /* Allow zero events */
107 zassert_equal(k_poll(events, 0, K_NO_WAIT), -EAGAIN, NULL);
108
109 struct k_poll_event bad_events[] = {
110 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
111 K_POLL_NUM_MODES,
112 &no_wait_sem),
113 };
114 zassert_equal(k_poll(bad_events, ARRAY_SIZE(bad_events), K_NO_WAIT),
115 -EINVAL,
116 NULL);
117
118 struct k_poll_event bad_events2[] = {
119 K_POLL_EVENT_INITIALIZER(0xFU,
120 K_POLL_MODE_NOTIFY_ONLY,
121 &no_wait_sem),
122 };
123 zassert_equal(k_poll(bad_events2, ARRAY_SIZE(bad_events), K_NO_WAIT),
124 -EINVAL,
125 NULL);
126 #endif /* CONFIG_USERSPACE */
127
128 /* test polling events that are already ready */
129 zassert_false(k_fifo_alloc_put(&no_wait_fifo, &msg), NULL);
130 k_poll_signal_raise(&no_wait_signal, SIGNAL_RESULT);
131 zassert_false(k_msgq_put(mq, msgq_msg, K_NO_WAIT), NULL);
132
133 zassert_equal(k_poll(events, ARRAY_SIZE(events), K_NO_WAIT), 0, "");
134
135 zassert_equal(events[0].state, K_POLL_STATE_SEM_AVAILABLE, "");
136 zassert_equal(k_sem_take(&no_wait_sem, K_NO_WAIT), 0, "");
137
138 zassert_equal(events[1].state, K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
139 msg_ptr = k_fifo_get(&no_wait_fifo, K_NO_WAIT);
140 zassert_not_null(msg_ptr, "");
141 zassert_equal(msg_ptr, &msg, "");
142 zassert_equal(msg_ptr->msg, FIFO_MSG_VALUE, "");
143
144 zassert_equal(events[2].state, K_POLL_STATE_SIGNALED, "");
145 k_poll_signal_check(&no_wait_signal, &signaled, &result);
146 zassert_not_equal(signaled, 0, "");
147 zassert_equal(result, SIGNAL_RESULT, "");
148
149 zassert_equal(events[3].state, K_POLL_STATE_NOT_READY, "");
150
151 zassert_equal(events[4].state, K_POLL_STATE_MSGQ_DATA_AVAILABLE, "");
152 zassert_false(k_msgq_get(mq, msgq_recv_buf, K_NO_WAIT), NULL);
153 zassert_false(memcmp(msgq_msg, msgq_recv_buf, MSGQ_MSG_SIZE), "");
154
155 /* verify events are not ready anymore (user has to clear them first) */
156 events[0].state = K_POLL_STATE_NOT_READY;
157 events[1].state = K_POLL_STATE_NOT_READY;
158 events[2].state = K_POLL_STATE_NOT_READY;
159 events[3].state = K_POLL_STATE_NOT_READY;
160 events[4].state = K_POLL_STATE_NOT_READY;
161 k_poll_signal_reset(&no_wait_signal);
162
163 zassert_equal(k_poll(events, ARRAY_SIZE(events), K_NO_WAIT), -EAGAIN,
164 "");
165 zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
166 zassert_equal(events[1].state, K_POLL_STATE_NOT_READY, "");
167 zassert_equal(events[2].state, K_POLL_STATE_NOT_READY, "");
168 zassert_equal(events[3].state, K_POLL_STATE_NOT_READY, "");
169 zassert_equal(events[4].state, K_POLL_STATE_NOT_READY, "");
170
171 zassert_not_equal(k_sem_take(&no_wait_sem, K_NO_WAIT), 0, "");
172 zassert_is_null(k_fifo_get(&no_wait_fifo, K_NO_WAIT), "");
173 zassert_not_equal(k_msgq_get(mq, msgq_recv_buf, K_NO_WAIT), 0,
174 "");
175 }
176
177 /* verify k_poll() that has to wait */
178 static struct k_msgq wait_msgq;
179 static struct k_msgq *wait_msgq_ptr;
180
181 static K_SEM_DEFINE(wait_sem, 0, 1);
182 static K_FIFO_DEFINE(wait_fifo);
183 static struct k_poll_signal wait_signal =
184 K_POLL_SIGNAL_INITIALIZER(wait_signal);
185
186 struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE };
187
188 #define TAG_0 10
189 #define TAG_1 11
190 #define TAG_2 12
191 #define TAG_3 13
192
193 struct k_poll_event wait_events[] = {
194 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
195 K_POLL_MODE_NOTIFY_ONLY,
196 &wait_sem, TAG_0),
197 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
198 K_POLL_MODE_NOTIFY_ONLY,
199 &wait_fifo, TAG_1),
200 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_SIGNAL,
201 K_POLL_MODE_NOTIFY_ONLY,
202 &wait_signal, TAG_2),
203 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
204 K_POLL_MODE_NOTIFY_ONLY,
205 NULL),
206 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
207 K_POLL_MODE_NOTIFY_ONLY,
208 &wait_msgq, TAG_3),
209 };
210
211 #define USE_FIFO (1 << 0)
212 #define USE_MSGQ (1 << 1)
213
poll_wait_helper(void * use_queuelike,void * msgq,void * p3)214 static void poll_wait_helper(void *use_queuelike, void *msgq, void *p3)
215 {
216 (void)p3;
217
218 k_sleep(K_MSEC(250));
219
220 k_sem_give(&wait_sem);
221
222 uintptr_t flags = (uintptr_t)use_queuelike;
223
224 if (flags & USE_FIFO) {
225 k_fifo_alloc_put(&wait_fifo, &wait_msg);
226 }
227
228 k_poll_signal_raise(&wait_signal, SIGNAL_RESULT);
229
230 if (flags & USE_MSGQ) {
231 char m[] = MSGQ_MSG_VALUE;
232
233 k_msgq_put(msgq, &m[0], K_FOREVER);
234 }
235 }
236
237 /* check results for multiple events */
check_results(struct k_poll_event * events,uint32_t event_type,bool is_available)238 void check_results(struct k_poll_event *events, uint32_t event_type,
239 bool is_available)
240 {
241 struct fifo_msg *msg_ptr;
242 char msgq_recv_buf[MSGQ_MSG_SIZE] = {0};
243 char msg[] = MSGQ_MSG_VALUE;
244
245 switch (event_type) {
246 case K_POLL_TYPE_SEM_AVAILABLE:
247 if (is_available) {
248 zassert_equal(events->state, K_POLL_STATE_SEM_AVAILABLE,
249 "");
250 zassert_equal(k_sem_take(&wait_sem, K_NO_WAIT), 0, "");
251 zassert_equal(events->tag, TAG_0, "");
252 /* reset to not ready */
253 events->state = K_POLL_STATE_NOT_READY;
254 } else {
255 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
256 "");
257 zassert_equal(k_sem_take(&wait_sem, K_NO_WAIT), -EBUSY,
258 "");
259 zassert_equal(events->tag, TAG_0, "");
260 }
261 break;
262 case K_POLL_TYPE_DATA_AVAILABLE:
263 if (is_available) {
264 zassert_equal(events->state,
265 K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
266 msg_ptr = k_fifo_get(&wait_fifo, K_NO_WAIT);
267 zassert_not_null(msg_ptr, "");
268 zassert_equal(msg_ptr, &wait_msg, "");
269 zassert_equal(msg_ptr->msg, FIFO_MSG_VALUE, "");
270 zassert_equal(events->tag, TAG_1, "");
271 /* reset to not ready */
272 events->state = K_POLL_STATE_NOT_READY;
273 } else {
274 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
275 "");
276 }
277 break;
278 case K_POLL_TYPE_SIGNAL:
279 if (is_available) {
280 zassert_equal(wait_events[2].state,
281 K_POLL_STATE_SIGNALED, "");
282 zassert_equal(wait_signal.signaled, 1, "");
283 zassert_equal(wait_signal.result, SIGNAL_RESULT, "");
284 zassert_equal(wait_events[2].tag, TAG_2, "");
285 /* reset to not ready */
286 events->state = K_POLL_STATE_NOT_READY;
287 wait_signal.signaled = 0U;
288 } else {
289 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
290 "");
291 }
292 break;
293 case K_POLL_TYPE_IGNORE:
294 zassert_equal(wait_events[3].state, K_POLL_STATE_NOT_READY, "");
295 break;
296 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
297 if (is_available) {
298 zassert_equal(events->state,
299 K_POLL_STATE_MSGQ_DATA_AVAILABLE, "");
300
301 zassert_false(k_msgq_get(wait_msgq_ptr, msgq_recv_buf,
302 K_NO_WAIT), "");
303 zassert_false(memcmp(msg, msgq_recv_buf,
304 MSGQ_MSG_SIZE), "");
305 zassert_equal(events->tag, TAG_3, "");
306 /* reset to not ready */
307 events->state = K_POLL_STATE_NOT_READY;
308 } else {
309 zassert_equal(events->state, K_POLL_STATE_NOT_READY,
310 "");
311 }
312 break;
313
314 default:
315 __ASSERT(false, "invalid event type (0x%x)\n", event_type);
316 break;
317 }
318 }
319
320 /**
321 * @brief Test polling with wait
322 *
323 * @ingroup kernel_poll_tests
324 *
325 * @details
326 * Test Objective:
327 * - Test the poll operation which enables waiting concurrently
328 * for one/two/all conditions to be fulfilled.
329 * - set a single timeout argument indicating
330 * the maximum amount of time a thread shall wait.
331 *
332 * Testing techniques:
333 * - function and block box testing.
334 * - Interface testing.
335 * - Dynamic analysis and testing.
336 *
337 * Prerequisite Conditions:
338 * - CONFIG_TEST_USERSPACE
339 * - CONFIG_DYNAMIC_OBJECTS
340 * - CONFIG_POLL
341 *
342 * Input Specifications:
343 * - N/A
344 *
345 * Test Procedure:
346 * -# Use FIFO/semaphore/signal/message queue object to define poll event.
347 * -# Initialize the FIFO/semaphore/signal/message queue object.
348 * -# Create a thread to put FIFO,
349 * give semaphore, raise signal, and put message queue.
350 * -# Check the result when signal is raised,
351 * semaphore is given, fifo is filled, and message is received.
352 * -# Check the result when no event is satisfied.
353 * -# Check the result when only semaphore is given.
354 * -# Check the result when only FIFO is filled.
355 * -# Check the result when only signal is raised.
356 * -# Check the result when only message is received.
357 *
358 * Expected Test Result:
359 * - FIFO/semaphore/signal/message queue events available/waitable in poll.
360 *
361 * Pass/Fail Criteria:
362 * - Successful if check points in test procedure are all passed, otherwise failure.
363 *
364 * Assumptions and Constraints:
365 * - N/A
366 *
367 * @see k_poll_signal_init(), k_poll()
368 */
test_poll_wait(void)369 void test_poll_wait(void)
370 {
371 const int main_low_prio = 10;
372
373 #ifdef CONFIG_USERSPACE
374 wait_msgq_ptr = k_object_alloc(K_OBJ_MSGQ);
375 k_msgq_alloc_init(wait_msgq_ptr, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
376
377 k_poll_event_init(&wait_events[4],
378 K_POLL_TYPE_MSGQ_DATA_AVAILABLE,
379 K_POLL_MODE_NOTIFY_ONLY,
380 wait_msgq_ptr);
381 wait_events[4].tag = TAG_3;
382 #else
383 wait_msgq_ptr = &wait_msgq;
384 k_msgq_alloc_init(wait_msgq_ptr, MSGQ_MSG_SIZE, MSGQ_MAX_MSGS);
385 #endif
386 int rc;
387
388 int old_prio = k_thread_priority_get(k_current_get());
389
390 k_poll_signal_init(&wait_signal);
391 /*
392 * Wait for 4 non-ready events to become ready from a higher priority
393 * thread.
394 */
395 k_thread_priority_set(k_current_get(), main_low_prio);
396
397 k_thread_create(&test_thread, test_stack,
398 K_THREAD_STACK_SIZEOF(test_stack),
399 poll_wait_helper, (void *)(USE_FIFO | USE_MSGQ), wait_msgq_ptr, 0,
400 main_low_prio - 1, K_USER | K_INHERIT_PERMS,
401 K_NO_WAIT);
402
403 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_NO_WAIT);
404 zassert_equal(rc, -EAGAIN, "should return EAGAIN with K_NO_WAIT");
405
406 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
407
408 k_thread_priority_set(k_current_get(), old_prio);
409
410 zassert_equal(rc, 0, "");
411 /* all events should be available. */
412
413 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
414 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, true);
415 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
416 check_results(&wait_events[3], K_POLL_TYPE_IGNORE, true);
417 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, true);
418
419 /* verify events are not ready anymore */
420 zassert_equal(k_poll(wait_events, ARRAY_SIZE(wait_events),
421 K_SECONDS(1)), -EAGAIN, "");
422 /* all events should not be available. */
423
424 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
425 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
426 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
427 check_results(&wait_events[3], K_POLL_TYPE_IGNORE, false);
428 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
429
430 /*
431 * Wait for 2 out of 4 non-ready events to become ready from a higher
432 * priority thread.
433 */
434 k_thread_priority_set(k_current_get(), main_low_prio);
435
436 k_thread_create(&test_thread, test_stack,
437 K_THREAD_STACK_SIZEOF(test_stack),
438 poll_wait_helper,
439 0, 0, 0, main_low_prio - 1, 0, K_NO_WAIT);
440
441 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
442
443 k_thread_priority_set(k_current_get(), old_prio);
444
445 zassert_equal(rc, 0, "");
446
447 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
448 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
449 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
450 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
451
452 /*
453 * Wait for each event to be ready from a lower priority thread, one at
454 * a time.
455 */
456 k_thread_create(&test_thread, test_stack,
457 K_THREAD_STACK_SIZEOF(test_stack),
458 poll_wait_helper,
459 (void *)(USE_FIFO | USE_MSGQ), wait_msgq_ptr, 0, old_prio + 1,
460 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
461 /* semaphore */
462 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
463 zassert_equal(rc, 0, "");
464
465 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, true);
466 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
467 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
468 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
469
470 /* fifo */
471 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
472
473 zassert_equal(rc, 0, "");
474
475 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
476 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, true);
477 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
478 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
479
480 /* poll signal */
481 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
482
483 zassert_equal(rc, 0, "");
484
485 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
486 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
487 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, true);
488 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, false);
489
490 /* message queue */
491 rc = k_poll(wait_events, ARRAY_SIZE(wait_events), K_SECONDS(1));
492
493 zassert_equal(rc, 0, "");
494
495 check_results(&wait_events[0], K_POLL_TYPE_SEM_AVAILABLE, false);
496 check_results(&wait_events[1], K_POLL_TYPE_DATA_AVAILABLE, false);
497 check_results(&wait_events[2], K_POLL_TYPE_SIGNAL, false);
498 check_results(&wait_events[4], K_POLL_TYPE_MSGQ_DATA_AVAILABLE, true);
499
500 }
501
502 /* verify k_poll() that waits on object which gets cancellation */
503
504 static struct k_fifo cancel_fifo;
505 static struct k_fifo non_cancel_fifo;
506
poll_cancel_helper(void * p1,void * p2,void * p3)507 static void poll_cancel_helper(void *p1, void *p2, void *p3)
508 {
509 (void)p1; (void)p2; (void)p3;
510
511 static struct fifo_msg msg;
512
513 k_sleep(K_MSEC(100));
514
515 k_fifo_cancel_wait(&cancel_fifo);
516
517 k_fifo_alloc_put(&non_cancel_fifo, &msg);
518 }
519
520 /**
521 * @brief Test polling of cancelled fifo
522 *
523 * @details Test the FIFO(queue) data available/cancelable events
524 * as events in poll.
525 *
526 * @ingroup kernel_poll_tests
527 *
528 * @see k_poll(), k_fifo_cancel_wait(), k_fifo_alloc_put
529 */
test_poll_cancel(bool is_main_low_prio)530 void test_poll_cancel(bool is_main_low_prio)
531 {
532 const int main_low_prio = 10;
533 int old_prio = k_thread_priority_get(k_current_get());
534 int rc;
535
536 struct k_poll_event cancel_events[] = {
537 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
538 K_POLL_MODE_NOTIFY_ONLY,
539 &cancel_fifo),
540 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
541 K_POLL_MODE_NOTIFY_ONLY,
542 &non_cancel_fifo),
543 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_IGNORE,
544 K_POLL_MODE_NOTIFY_ONLY,
545 NULL),
546 };
547
548 k_fifo_init(&cancel_fifo);
549 k_fifo_init(&non_cancel_fifo);
550
551 if (is_main_low_prio) {
552 k_thread_priority_set(k_current_get(), main_low_prio);
553 }
554
555 k_thread_create(&test_thread, test_stack,
556 K_THREAD_STACK_SIZEOF(test_stack),
557 poll_cancel_helper, (void *)1, 0, 0,
558 main_low_prio - 1, K_USER | K_INHERIT_PERMS,
559 K_NO_WAIT);
560
561 rc = k_poll(cancel_events, ARRAY_SIZE(cancel_events), K_SECONDS(1));
562
563 k_thread_priority_set(k_current_get(), old_prio);
564
565 zassert_equal(rc, -EINTR, "");
566
567 zassert_equal(cancel_events[0].state,
568 K_POLL_STATE_CANCELLED, "");
569
570 if (is_main_low_prio) {
571 /* If poller thread is lower priority than threads which
572 * generate poll events, it may get multiple poll events
573 * at once.
574 */
575 zassert_equal(cancel_events[1].state,
576 K_POLL_STATE_FIFO_DATA_AVAILABLE, "");
577 } else {
578 /* Otherwise, poller thread will be woken up on first
579 * event triggered.
580 */
581 zassert_equal(cancel_events[1].state,
582 K_POLL_STATE_NOT_READY, "");
583 }
584 }
585
test_poll_cancel_main_low_prio(void)586 void test_poll_cancel_main_low_prio(void)
587 {
588 test_poll_cancel(true);
589 }
590
test_poll_cancel_main_high_prio(void)591 void test_poll_cancel_main_high_prio(void)
592 {
593 test_poll_cancel(false);
594 }
595
596 /* verify multiple pollers */
597 static K_SEM_DEFINE(multi_sem, 0, 1);
598
multi_lowprio(void * p1,void * p2,void * p3)599 static void multi_lowprio(void *p1, void *p2, void *p3)
600 {
601 (void)p1; (void)p2; (void)p3;
602
603 struct k_poll_event event;
604 int rc;
605
606 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
607 K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
608
609 (void)k_poll(&event, 1, K_FOREVER);
610 rc = k_sem_take(&multi_sem, K_FOREVER);
611 zassert_equal(rc, 0, "");
612 }
613
614 static K_SEM_DEFINE(multi_reply, 0, 1);
615
multi(void * p1,void * p2,void * p3)616 static void multi(void *p1, void *p2, void *p3)
617 {
618 (void)p1; (void)p2; (void)p3;
619
620 struct k_poll_event event;
621
622 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
623 K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
624
625 (void)k_poll(&event, 1, K_FOREVER);
626 k_sem_take(&multi_sem, K_FOREVER);
627 k_sem_give(&multi_reply);
628 }
629
630 static K_SEM_DEFINE(multi_ready_sem, 1, 1);
631
632 /**
633 * @brief Test polling of multiple events
634 *
635 * @details
636 * - Test the multiple semaphore events as waitable events in poll.
637 *
638 * @ingroup kernel_poll_tests
639 *
640 * @see K_POLL_EVENT_INITIALIZER(), k_poll(), k_poll_event_init()
641 */
test_poll_multi(void)642 void test_poll_multi(void)
643 {
644 int old_prio = k_thread_priority_get(k_current_get());
645 const int main_low_prio = 10;
646 int rc;
647
648 struct k_poll_event events[] = {
649 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
650 K_POLL_MODE_NOTIFY_ONLY,
651 &multi_sem),
652 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
653 K_POLL_MODE_NOTIFY_ONLY,
654 &multi_ready_sem),
655 };
656
657 k_thread_priority_set(k_current_get(), main_low_prio);
658
659 k_thread_create(&test_thread, test_stack,
660 K_THREAD_STACK_SIZEOF(test_stack),
661 multi, 0, 0, 0, main_low_prio - 1,
662 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
663
664 /*
665 * create additional thread to add multiple(more than one)
666 * pending threads in events list to improve code coverage.
667 */
668 k_thread_create(&test_loprio_thread, test_loprio_stack,
669 K_THREAD_STACK_SIZEOF(test_loprio_stack),
670 multi_lowprio, 0, 0, 0, main_low_prio + 1,
671 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
672
673 /* Allow lower priority thread to add poll event in the list */
674 k_sleep(K_MSEC(250));
675 rc = k_poll(events, ARRAY_SIZE(events), K_SECONDS(1));
676
677 zassert_equal(rc, 0, "");
678 zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
679 zassert_equal(events[1].state, K_POLL_STATE_SEM_AVAILABLE, "");
680
681 /*
682 * free polling threads, ensuring it awoken from k_poll()
683 * and got the sem
684 */
685 k_sem_give(&multi_sem);
686 k_sem_give(&multi_sem);
687 rc = k_sem_take(&multi_reply, K_SECONDS(1));
688
689 zassert_equal(rc, 0, "");
690
691 /* wait for polling threads to complete execution */
692 k_thread_priority_set(k_current_get(), old_prio);
693 k_sleep(K_MSEC(250));
694 }
695
696 static struct k_poll_signal signal;
697
threadstate(void * p1,void * p2,void * p3)698 static void threadstate(void *p1, void *p2, void *p3)
699 {
700 (void)p2; (void)p3;
701
702 k_sleep(K_MSEC(250));
703 /* Update polling thread state explicitly to improve code coverage */
704 k_thread_suspend(p1);
705 /* Enable polling thread by signalling */
706 k_poll_signal_raise(&signal, SIGNAL_RESULT);
707 k_thread_resume(p1);
708 }
709
710 /**
711 * @brief Test polling of events by manipulating polling thread state
712 *
713 * @details
714 * - manipulating thread state to consider case where no polling thread
715 * is available during event signalling.
716 * - defined a signal poll as waitable events in poll and
717 * verify the result after siganl raised
718 *
719 * @ingroup kernel_poll_tests
720 *
721 * @see K_POLL_EVENT_INITIALIZER(), k_poll(), k_poll_signal_init(),
722 * k_poll_signal_check(), k_poll_signal_raise()
723 */
test_poll_threadstate(void)724 void test_poll_threadstate(void)
725 {
726 unsigned int signaled;
727 const int main_low_prio = 10;
728 int result;
729
730 k_poll_signal_init(&signal);
731
732 struct k_poll_event event;
733
734 k_poll_event_init(&event, K_POLL_TYPE_SIGNAL,
735 K_POLL_MODE_NOTIFY_ONLY, &signal);
736
737 int old_prio = k_thread_priority_get(k_current_get());
738
739 k_thread_priority_set(k_current_get(), main_low_prio);
740 k_tid_t ztest_tid = k_current_get();
741
742 k_thread_create(&test_thread, test_stack,
743 K_THREAD_STACK_SIZEOF(test_stack), threadstate,
744 ztest_tid, 0, 0, main_low_prio - 1, K_INHERIT_PERMS,
745 K_NO_WAIT);
746
747 /* wait for spawn thread to take action */
748 zassert_equal(k_poll(&event, 1, K_SECONDS(1)), 0, "");
749 zassert_equal(event.state, K_POLL_STATE_SIGNALED, "");
750 k_poll_signal_check(&signal, &signaled, &result);
751 zassert_not_equal(signaled, 0, "");
752 zassert_equal(result, SIGNAL_RESULT, "");
753
754 event.state = K_POLL_STATE_NOT_READY;
755 k_poll_signal_reset(&signal);
756 /* teardown */
757 k_thread_priority_set(k_current_get(), old_prio);
758 }
759
test_poll_grant_access(void)760 void test_poll_grant_access(void)
761 {
762 k_thread_access_grant(k_current_get(), &no_wait_sem, &no_wait_fifo,
763 &no_wait_signal, &wait_sem, &wait_fifo,
764 &cancel_fifo, &non_cancel_fifo,
765 &wait_signal, &test_thread, &test_signal,
766 &test_stack, &multi_sem, &multi_reply);
767 }
768
test_poll_zero_events(void)769 void test_poll_zero_events(void)
770 {
771 struct k_poll_event event;
772
773 k_sem_init(&zero_events_sem, 1, 1);
774
775 k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
776 K_POLL_MODE_NOTIFY_ONLY, &zero_events_sem);
777
778 zassert_equal(k_poll(&event, 0, K_MSEC(50)), -EAGAIN, NULL);
779 }
780
781 /* subthread entry */
polling_event(void * p1,void * p2,void * p3)782 void polling_event(void *p1, void *p2, void *p3)
783 {
784 k_poll(p1, 1, K_FOREVER);
785 }
786
787 /**
788 * @brief Detect is_polling is false in signal_poll_event()
789 *
790 * @details
791 * Define and initialize a signal event, and spawn a thread to
792 * poll event, and set dticks as invalid, check if the value
793 * of is_polling in function signal_poll_event().
794 *
795 * @ingroup kernel_poll_tests
796 */
test_detect_is_polling(void)797 void test_detect_is_polling(void)
798 {
799 k_poll_signal_init(&test_signal);
800
801 struct k_thread *p = &test_thread;
802 struct k_poll_event events[1] = {
803 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
804 K_POLL_MODE_NOTIFY_ONLY,
805 &test_signal),
806 };
807
808 k_thread_create(&test_thread, test_stack,
809 K_THREAD_STACK_SIZEOF(test_stack), polling_event,
810 events, NULL, NULL, K_PRIO_PREEMPT(0),
811 K_INHERIT_PERMS, K_NO_WAIT);
812
813 /* Set up the thread timeout value to check if
814 * what happened if dticks is invalid.
815 */
816 p->base.timeout.dticks = _EXPIRED;
817 /* Wait for register event successfully */
818 k_sleep(K_MSEC(50));
819
820 /* Raise a signal */
821 int ret = k_poll_signal_raise(&test_signal, 0x1337);
822
823 zassert_true(ret == -EAGAIN, "thread expired failed\n");
824 zassert_true(events[0].poller->is_polling == false,
825 "the value of is_polling is invalid\n");
826 }
827