1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "test_queue.h"
8 
9 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
10 #define LIST_LEN 2
11 /**TESTPOINT: init via K_QUEUE_DEFINE*/
12 K_QUEUE_DEFINE(kqueue);
13 
14 K_HEAP_DEFINE(mem_pool_fail, 8 + 128);
15 K_HEAP_DEFINE(mem_pool_pass, 64 * 4 + 128);
16 
17 struct k_queue queue;
18 static qdata_t data[LIST_LEN];
19 static qdata_t data_p[LIST_LEN];
20 static qdata_t data_l[LIST_LEN];
21 static qdata_t data_sl[LIST_LEN];
22 
23 static qdata_t *data_append;
24 static qdata_t *data_prepend;
25 
26 static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
27 static struct k_thread tdata;
28 static K_THREAD_STACK_DEFINE(tstack1, STACK_SIZE);
29 static struct k_thread tdata1;
30 static K_THREAD_STACK_DEFINE(tstack2, STACK_SIZE);
31 static struct k_thread tdata2;
32 static struct k_sem end_sema;
33 
tqueue_append(struct k_queue * pqueue)34 static void tqueue_append(struct k_queue *pqueue)
35 {
36 	k_queue_insert(pqueue, k_queue_peek_tail(pqueue),
37 		       (void *)&data[0]);
38 
39 	for (int i = 1; i < LIST_LEN; i++) {
40 		/**TESTPOINT: queue append */
41 		k_queue_append(pqueue, (void *)&data[i]);
42 	}
43 
44 	for (int i = LIST_LEN - 1; i >= 0; i--) {
45 		/**TESTPOINT: queue prepend */
46 		k_queue_prepend(pqueue, (void *)&data_p[i]);
47 	}
48 
49 	/**TESTPOINT: queue append list*/
50 	static qdata_t *head = &data_l[0], *tail = &data_l[LIST_LEN - 1];
51 
52 	head->snode.next = (sys_snode_t *)tail;
53 	tail->snode.next = NULL;
54 	k_queue_append_list(pqueue, (uint32_t *)head, (uint32_t *)tail);
55 
56 	/**TESTPOINT: queue merge slist*/
57 	sys_slist_t slist;
58 
59 	sys_slist_init(&slist);
60 	sys_slist_append(&slist, (sys_snode_t *)&(data_sl[0].snode));
61 	sys_slist_append(&slist, (sys_snode_t *)&(data_sl[1].snode));
62 	k_queue_merge_slist(pqueue, &slist);
63 }
64 
tqueue_get(struct k_queue * pqueue)65 static void tqueue_get(struct k_queue *pqueue)
66 {
67 	void *rx_data;
68 
69 	/*get queue data from "queue_prepend"*/
70 	for (int i = 0; i < LIST_LEN; i++) {
71 		/**TESTPOINT: queue get*/
72 		rx_data = k_queue_get(pqueue, K_NO_WAIT);
73 		zassert_equal(rx_data, (void *)&data_p[i]);
74 	}
75 	/*get queue data from "queue_append"*/
76 	for (int i = 0; i < LIST_LEN; i++) {
77 		/**TESTPOINT: queue get*/
78 		rx_data = k_queue_get(pqueue, K_NO_WAIT);
79 		zassert_equal(rx_data, (void *)&data[i]);
80 	}
81 	/*get queue data from "queue_append_list"*/
82 	for (int i = 0; i < LIST_LEN; i++) {
83 		rx_data = k_queue_get(pqueue, K_NO_WAIT);
84 		zassert_equal(rx_data, (void *)&data_l[i]);
85 	}
86 	/*get queue data from "queue_merge_slist"*/
87 	for (int i = 0; i < LIST_LEN; i++) {
88 		rx_data = k_queue_get(pqueue, K_NO_WAIT);
89 		zassert_equal(rx_data, (void *)&data_sl[i]);
90 	}
91 }
92 
93 /*entry of contexts*/
tIsr_entry_append(const void * p)94 static void tIsr_entry_append(const void *p)
95 {
96 	tqueue_append((struct k_queue *)p);
97 }
98 
tIsr_entry_get(const void * p)99 static void tIsr_entry_get(const void *p)
100 {
101 	tqueue_get((struct k_queue *)p);
102 }
103 
tThread_entry(void * p1,void * p2,void * p3)104 static void tThread_entry(void *p1, void *p2, void *p3)
105 {
106 	tqueue_get((struct k_queue *)p1);
107 	k_sem_give(&end_sema);
108 }
109 
tqueue_thread_thread(struct k_queue * pqueue)110 static void tqueue_thread_thread(struct k_queue *pqueue)
111 {
112 	k_sem_init(&end_sema, 0, 1);
113 	/**TESTPOINT: thread-thread data passing via queue*/
114 	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
115 				      tThread_entry, pqueue, NULL, NULL,
116 				      K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
117 	tqueue_append(pqueue);
118 	k_sem_take(&end_sema, K_FOREVER);
119 	k_thread_abort(tid);
120 }
121 
tqueue_thread_isr(struct k_queue * pqueue)122 static void tqueue_thread_isr(struct k_queue *pqueue)
123 {
124 	k_sem_init(&end_sema, 0, 1);
125 	/**TESTPOINT: thread-isr data passing via queue*/
126 	irq_offload(tIsr_entry_append, (const void *)pqueue);
127 	tqueue_get(pqueue);
128 }
129 
tqueue_isr_thread(struct k_queue * pqueue)130 static void tqueue_isr_thread(struct k_queue *pqueue)
131 {
132 	k_sem_init(&end_sema, 0, 1);
133 	/**TESTPOINT: isr-thread data passing via queue*/
134 	tqueue_append(pqueue);
135 	irq_offload(tIsr_entry_get, (const void *)pqueue);
136 }
137 
138 /*test cases*/
139 /**
140  * @brief Verify data passing between threads using queue
141  *
142  * @details Static define and Dynamic define queues,
143  * Then initialize them.
144  * Create a new thread to wait for reading data.
145  * Current thread will append item into queue.
146  * Verify if rx_data is equal insert-data address.
147  * Verify queue can be define at compile time.
148  *
149  * @ingroup kernel_queue_tests
150  *
151  * @see k_queue_init(), k_queue_insert(), k_queue_append()
152  * K_THREAD_STACK_DEFINE()
153  */
ZTEST(queue_api_1cpu,test_queue_thread2thread)154 ZTEST(queue_api_1cpu, test_queue_thread2thread)
155 {
156 	/**TESTPOINT: init via k_queue_init*/
157 	k_queue_init(&queue);
158 	tqueue_thread_thread(&queue);
159 
160 	/**TESTPOINT: test K_QUEUE_DEFINEed queue*/
161 	tqueue_thread_thread(&kqueue);
162 }
163 
164 /**
165  * @brief Verify data passing between thread and ISR
166  *
167  * @details Create a new ISR to insert data
168  * And current thread is used for getting data
169  * Verify if the rx_data is equal insert-data address.
170  * If the received data address is the same as
171  * the created array, prove that the queue data structures
172  * are stored within the provided data items.
173  *
174  * @ingroup kernel_queue_tests
175  *
176  * @see k_queue_init(), k_queue_insert(), k_queue_append()
177  */
ZTEST(queue_api,test_queue_thread2isr)178 ZTEST(queue_api, test_queue_thread2isr)
179 {
180 	/**TESTPOINT: init via k_queue_init*/
181 	k_queue_init(&queue);
182 	tqueue_thread_isr(&queue);
183 
184 	/**TESTPOINT: test K_QUEUE_DEFINEed queue*/
185 	tqueue_thread_isr(&kqueue);
186 }
187 
188 /**
189  * @brief Verify data passing between ISR and thread
190  *
191  * @details Create a new ISR and ready for getting data
192  * And current thread is used for inserting data
193  * Verify if the rx_data is equal insert-data address.
194  *
195  * @ingroup kernel_queue_tests
196  *
197  * @see k_queue_init(), k_queue_insert(), k_queue_get(),
198  * k_queue_append(), k_queue_remove()
199  */
ZTEST(queue_api,test_queue_isr2thread)200 ZTEST(queue_api, test_queue_isr2thread)
201 {
202 	/**TESTPOINT: test k_queue_init queue*/
203 	k_queue_init(&queue);
204 	tqueue_isr_thread(&queue);
205 
206 	/**TESTPOINT: test K_QUEUE_DEFINE queue*/
207 	tqueue_isr_thread(&kqueue);
208 }
209 
tThread_get(void * p1,void * p2,void * p3)210 static void tThread_get(void *p1, void *p2, void *p3)
211 {
212 	zassert_true(k_queue_get((struct k_queue *)p1, K_FOREVER) != NULL,
213 		     NULL);
214 	k_sem_give(&end_sema);
215 }
216 
tqueue_get_2threads(struct k_queue * pqueue)217 static void tqueue_get_2threads(struct k_queue *pqueue)
218 {
219 	k_sem_init(&end_sema, 0, 1);
220 	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
221 				      tThread_get, pqueue, NULL, NULL,
222 				      K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
223 
224 	k_tid_t tid1 = k_thread_create(&tdata1, tstack1, STACK_SIZE,
225 				       tThread_get, pqueue, NULL, NULL,
226 				       K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
227 
228 	/* Wait threads to initialize */
229 	k_sleep(K_MSEC(10));
230 
231 	k_queue_append(pqueue, (void *)&data[0]);
232 	k_queue_append(pqueue, (void *)&data[1]);
233 	/* Wait threads to finalize */
234 	k_sem_take(&end_sema, K_FOREVER);
235 	k_sem_take(&end_sema, K_FOREVER);
236 
237 	k_thread_abort(tid);
238 	k_thread_abort(tid1);
239 }
240 
241 /**
242  * @brief Verify k_queue_get()
243  * @ingroup kernel_queue_tests
244  * @see k_queue_init(), k_queue_get(),
245  * k_queue_append(), k_queue_alloc_prepend()
246  */
ZTEST(queue_api_1cpu,test_queue_get_2threads)247 ZTEST(queue_api_1cpu, test_queue_get_2threads)
248 {
249 	/**TESTPOINT: test k_queue_init queue*/
250 	k_queue_init(&queue);
251 
252 	tqueue_get_2threads(&queue);
253 }
254 
tqueue_alloc(struct k_queue * pqueue)255 static void tqueue_alloc(struct k_queue *pqueue)
256 {
257 	k_thread_heap_assign(k_current_get(), NULL);
258 
259 	/* Alloc append without resource pool */
260 	k_queue_alloc_append(pqueue, (void *)&data_append);
261 
262 	/* Insertion fails and alloc returns NOMEM */
263 	zassert_false(k_queue_remove(pqueue, &data_append));
264 
265 	/* Assign resource pool of lower size */
266 	k_thread_heap_assign(k_current_get(), &mem_pool_fail);
267 
268 	/* Prepend to the queue, but fails because of
269 	 * insufficient memory
270 	 */
271 	k_queue_alloc_prepend(pqueue, (void *)&data_prepend);
272 
273 	zassert_false(k_queue_remove(pqueue, &data_prepend));
274 
275 	/* No element must be present in the queue, as all
276 	 * operations failed
277 	 */
278 	zassert_true(k_queue_is_empty(pqueue));
279 
280 	/* Assign resource pool of sufficient size */
281 	k_thread_heap_assign(k_current_get(), &mem_pool_pass);
282 
283 	zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend),
284 		      NULL);
285 
286 	/* Now queue shouldn't be empty */
287 	zassert_false(k_queue_is_empty(pqueue));
288 
289 	zassert_true(k_queue_get(pqueue, K_FOREVER) != NULL,
290 		     NULL);
291 }
292 
293 /**
294  * @brief Test queue alloc append and prepend
295  * @ingroup kernel_queue_tests
296  * @see k_queue_alloc_append(), k_queue_alloc_prepend(),
297  * k_thread_heap_assign(), k_queue_is_empty(),
298  * k_queue_get(), k_queue_remove()
299  */
ZTEST(queue_api,test_queue_alloc)300 ZTEST(queue_api, test_queue_alloc)
301 {
302 	/* The mem_pool_fail pool is supposed to be too small to
303 	 * succeed any allocations, but in fact with the heap backend
304 	 * there's some base minimal memory in there that can be used.
305 	 * Make sure it's really truly full.
306 	 */
307 	while (k_heap_alloc(&mem_pool_fail, 1, K_NO_WAIT) != NULL) {
308 	}
309 
310 	k_queue_init(&queue);
311 
312 	tqueue_alloc(&queue);
313 }
314 
315 
316 /* Does nothing but read items out of the queue and verify that they
317  * are non-null.  Two such threads will be created.
318  */
queue_poll_race_consume(void * p1,void * p2,void * p3)319 static void queue_poll_race_consume(void *p1, void *p2, void *p3)
320 {
321 	struct k_queue *q = p1;
322 	int *count = p2;
323 
324 	while (true) {
325 		zassert_true(k_queue_get(q, K_FOREVER) != NULL);
326 		*count += 1;
327 	}
328 }
329 
330 /* There was a historical race in the queue internals when CONFIG_POLL
331  * was enabled -- it was possible to wake up a lower priority thread
332  * with an insert but then steal it with a higher priority thread
333  * before it got a chance to run, and the lower priority thread would
334  * then return NULL before its timeout expired.
335  */
ZTEST(queue_api_1cpu,test_queue_poll_race)336 ZTEST(queue_api_1cpu, test_queue_poll_race)
337 {
338 	int prio = k_thread_priority_get(k_current_get());
339 	static volatile int mid_count, low_count;
340 
341 	k_queue_init(&queue);
342 
343 	k_thread_create(&tdata, tstack, STACK_SIZE,
344 			queue_poll_race_consume,
345 			&queue, (void *)&mid_count, NULL,
346 			prio + 1, 0, K_NO_WAIT);
347 
348 	k_thread_create(&tdata1, tstack1, STACK_SIZE,
349 			queue_poll_race_consume,
350 			&queue, (void *)&low_count, NULL,
351 			prio + 2, 0, K_NO_WAIT);
352 
353 	/* Let them initialize and block */
354 	k_sleep(K_MSEC(10));
355 
356 	/* Insert two items.  This will wake up both threads, but the
357 	 * higher priority thread (tdata1) might (if CONFIG_POLL)
358 	 * consume both.  The lower priority thread should stay
359 	 * asleep.
360 	 */
361 	k_queue_append(&queue, &data[0]);
362 	k_queue_append(&queue, &data[1]);
363 
364 	zassert_true(low_count == 0);
365 	zassert_true(mid_count == 0);
366 
367 	k_sleep(K_MSEC(10));
368 
369 	zassert_true(low_count + mid_count == 2);
370 
371 	k_thread_abort(&tdata);
372 	k_thread_abort(&tdata1);
373 }
374 
375 /**
376  * @brief Verify that multiple queues can be defined
377  * simultaneously
378  *
379  * @details define multiple queues to verify
380  * they can work.
381  *
382  * @ingroup kernel_queue_tests
383  *
384  * @see k_queue_init()
385  */
386 #define QUEUE_NUM 10
ZTEST(queue_api,test_multiple_queues)387 ZTEST(queue_api, test_multiple_queues)
388 {
389 	/*define multiple queues*/
390 	static struct k_queue queues[QUEUE_NUM];
391 
392 	for (int i = 0; i < QUEUE_NUM; i++) {
393 		k_queue_init(&queues[i]);
394 
395 		/*Indicating that they are working*/
396 		tqueue_append(&queues[i]);
397 		tqueue_get(&queues[i]);
398 	}
399 }
400 
user_access_queue_private_data(void * p1,void * p2,void * p3)401 void user_access_queue_private_data(void *p1, void *p2, void *p3)
402 {
403 	ztest_set_fault_valid(true);
404 	/* try to access to private kernel data, will happen kernel oops */
405 	k_queue_is_empty(&queue);
406 }
407 
408 /**
409  * @brief Test access kernel object with private data using system call
410  *
411  * @details
412  * - When defining system calls, it is very important to ensure that
413  *   access to the API’s private data is done exclusively through system call
414  *   interfaces. Private kernel data should never be made available to user mode
415  *   threads directly. For example, the k_queue APIs were intentionally not made
416  *   available as they store bookkeeping information about the queue directly
417  *   in the queue buffers which are visible from user mode.
418  * - Current test makes user thread try to access private kernel data within
419  *   their associated data structures. Kernel will track that system call
420  *   access to these object with the kernel object permission system.
421  *   Current user thread doesn't have permission on it, trying to access
422  *   &pqueue kernel object will happen kernel oops, because current user
423  *   thread doesn't have permission on k_queue object with private kernel data.
424  *
425  * @ingroup kernel_memprotect_tests
426  */
ZTEST(queue_api,test_access_kernel_obj_with_priv_data)427 ZTEST(queue_api, test_access_kernel_obj_with_priv_data)
428 {
429 	k_queue_init(&queue);
430 	k_queue_insert(&queue, k_queue_peek_tail(&queue), (void *)&data[0]);
431 	k_thread_create(&tdata, tstack, STACK_SIZE, user_access_queue_private_data,
432 					NULL, NULL, NULL, 0, K_USER, K_NO_WAIT);
433 	k_thread_join(&tdata, K_FOREVER);
434 }
435 
low_prio_wait_for_queue(void * p1,void * p2,void * p3)436 static void low_prio_wait_for_queue(void *p1, void *p2, void *p3)
437 {
438 	struct k_queue *q = p1;
439 	uint32_t *ret = NULL;
440 
441 	ret = k_queue_get(q, K_FOREVER);
442 	zassert_true(*ret == 0xccc,
443 	"The low priority thread get the queue data failed lastly");
444 }
445 
high_prio_t1_wait_for_queue(void * p1,void * p2,void * p3)446 static void high_prio_t1_wait_for_queue(void *p1, void *p2, void *p3)
447 {
448 	struct k_queue *q = p1;
449 	uint32_t *ret = NULL;
450 
451 	ret = k_queue_get(q, K_FOREVER);
452 	zassert_true(*ret == 0xaaa,
453 	"The highest priority and waited longest get the queue data failed firstly");
454 }
455 
high_prio_t2_wait_for_queue(void * p1,void * p2,void * p3)456 static void high_prio_t2_wait_for_queue(void *p1, void *p2, void *p3)
457 {
458 	struct k_queue *q = p1;
459 	uint32_t *ret = NULL;
460 
461 	ret = k_queue_get(q, K_FOREVER);
462 	zassert_true(*ret == 0xbbb,
463 	"The higher priority and waited longer get the queue data failed secondly");
464 }
465 
466 /**
467  * @brief Test multi-threads to get data from a queue.
468  *
469  * @details Define three threads, and set a higher priority for two of them,
470  * and set a lower priority for the last one. Then Add a delay between
471  * creating the two high priority threads.
472  * Test point:
473  * 1. Any number of threads may wait on an empty FIFO simultaneously.
474  * 2. When a data item is added, it is given to the highest priority
475  * thread that has waited longest.
476  *
477  * @ingroup kernel_queue_tests
478  */
ZTEST(queue_api_1cpu,test_queue_multithread_competition)479 ZTEST(queue_api_1cpu, test_queue_multithread_competition)
480 {
481 	int old_prio = k_thread_priority_get(k_current_get());
482 	int prio = 10;
483 	uint32_t test_data[3];
484 
485 	memset(test_data, 0, sizeof(test_data));
486 	k_thread_priority_set(k_current_get(), prio);
487 	k_queue_init(&queue);
488 	zassert_true(k_queue_is_empty(&queue) != 0, " Initializing queue failed");
489 
490 	/* Set up some values */
491 	test_data[0] = 0xAAA;
492 	test_data[1] = 0xBBB;
493 	test_data[2] = 0xCCC;
494 
495 	k_thread_create(&tdata, tstack, STACK_SIZE,
496 			low_prio_wait_for_queue,
497 			&queue, NULL, NULL,
498 			prio + 4, 0, K_NO_WAIT);
499 
500 	k_thread_create(&tdata1, tstack1, STACK_SIZE,
501 			high_prio_t1_wait_for_queue,
502 			&queue, NULL, NULL,
503 			prio + 2, 0, K_NO_WAIT);
504 
505 	/* Make thread tdata and tdata1 wait more time */
506 	k_sleep(K_MSEC(10));
507 
508 	k_thread_create(&tdata2, tstack2, STACK_SIZE,
509 			high_prio_t2_wait_for_queue,
510 			&queue, NULL, NULL,
511 			prio + 2, 0, K_NO_WAIT);
512 
513 	/* Initialize them and block */
514 	k_sleep(K_MSEC(50));
515 
516 	/* Insert some data to wake up thread */
517 	k_queue_append(&queue, &test_data[0]);
518 	k_queue_append(&queue, &test_data[1]);
519 	k_queue_append(&queue, &test_data[2]);
520 
521 	/* Wait for thread exiting */
522 	k_thread_join(&tdata, K_FOREVER);
523 	k_thread_join(&tdata1, K_FOREVER);
524 	k_thread_join(&tdata2, K_FOREVER);
525 
526 	/* Revert priority of the main thread */
527 	k_thread_priority_set(k_current_get(), old_prio);
528 }
529 
530 /**
531  * @brief Verify k_queue_unique_append()
532  *
533  * @ingroup kernel_queue_tests
534  *
535  * @details Append the same data to the queue repeatedly,
536  * see if it returns expected value.
537  * And verify operation succeed if append different data to
538  * the queue.
539  *
540  * @see k_queue_unique_append()
541  */
ZTEST(queue_api,test_queue_unique_append)542 ZTEST(queue_api, test_queue_unique_append)
543 {
544 	bool ret;
545 
546 	k_queue_init(&queue);
547 	ret = k_queue_unique_append(&queue, (void *)&data[0]);
548 	zassert_true(ret, "queue unique append failed");
549 
550 	ret = k_queue_unique_append(&queue, (void *)&data[0]);
551 	zassert_false(ret, "queue unique append should fail");
552 
553 	ret = k_queue_unique_append(&queue, (void *)&data[1]);
554 	zassert_true(ret, "queue unique append failed");
555 }
556