1 /*
2  * Copyright (c) 2012-2015 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * @brief test context and thread APIs
9  *
10  * @defgroup kernel_context_tests Context Tests
11  *
12  * @ingroup all_tests
13  *
14  * This module tests the following CPU and thread related routines:
15  * k_thread_create(), k_yield(), k_is_in_isr(),
16  * k_current_get(), k_cpu_idle(), k_cpu_atomic_idle(),
17  * irq_lock(), irq_unlock(),
18  * irq_offload(), irq_enable(), irq_disable(),
19  * @{
20  * @}
21  */
22 
23 #include <stdlib.h>
24 #include <zephyr/ztest.h>
25 #include <zephyr/kernel_structs.h>
26 #include <zephyr/arch/cpu.h>
27 #include <zephyr/irq_offload.h>
28 #include <zephyr/sys_clock.h>
29 
30 #if defined(CONFIG_SOC_POSIX)
31 /* TIMER_TICK_IRQ <soc.h> header for certain platforms */
32 #include <soc.h>
33 #endif
34 
35 #define THREAD_STACKSIZE    (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
36 #define THREAD_STACKSIZE2   (384 + CONFIG_TEST_EXTRA_STACK_SIZE)
37 #define THREAD_PRIORITY     4
38 
39 #define THREAD_SELF_CMD    0
40 #define EXEC_CTX_TYPE_CMD  1
41 
42 #define UNKNOWN_COMMAND    -1
43 #define INVALID_BEHAVIOUR  -2
44 
45 /*
46  * Get the timer type dependent IRQ number. If timer type
47  * is not defined in platform, generate an error
48  */
49 
50 #if defined(CONFIG_APIC_TSC_DEADLINE_TIMER) || defined(CONFIG_APIC_TIMER_TSC)
51 #define TICK_IRQ z_loapic_irq_base() /* first LVT interrupt */
52 #elif defined(CONFIG_CPU_CORTEX_M)
53 /*
54  * The Cortex-M use the SYSTICK exception for the system timer, which is
55  * not considered an IRQ by the irq_enable/Disable APIs.
56  */
57 #elif defined(CONFIG_SPARC)
58 #elif defined(CONFIG_MIPS)
59 #elif defined(CONFIG_ARCH_POSIX)
60 #if defined(CONFIG_BOARD_NATIVE_POSIX) || defined(CONFIG_BOARD_NATIVE_SIM)
61 #define TICK_IRQ TIMER_TICK_IRQ
62 #else
63 /*
64  * Other POSIX arch boards will skip the irq_disable() and irq_enable() test
65  * unless TICK_IRQ is defined here for them
66  */
67 #endif /* defined(CONFIG_ARCH_POSIX) */
68 #else
69 
70 extern const int32_t z_sys_timer_irq_for_test;
71 #define TICK_IRQ (z_sys_timer_irq_for_test)
72 
73 #endif
74 
75 /* Cortex-M1 and Nios II do have a power saving instruction, so k_cpu_idle()
76  * returns immediately
77  */
78 #if !defined(CONFIG_CPU_CORTEX_M1) && !defined(CONFIG_NIOS2)
79 #define HAS_POWERSAVE_INSTRUCTION
80 #endif
81 
82 
83 
84 typedef struct {
85 	int command;            /* command to process   */
86 	int error;              /* error value (if any) */
87 	union {
88 		void *data;     /* pointer to data to use or return */
89 		int value;      /* value to be passed or returned   */
90 	};
91 } ISR_INFO;
92 
93 
94 typedef int (*disable_int_func) (int);
95 typedef void (*enable_int_func) (int);
96 
97 static struct k_sem sem_thread;
98 static struct k_timer timer;
99 static struct k_sem reply_timeout;
100 struct k_fifo timeout_order_fifo;
101 
102 static int thread_evidence;
103 
104 static K_THREAD_STACK_DEFINE(thread_stack1, THREAD_STACKSIZE);
105 static K_THREAD_STACK_DEFINE(thread_stack2, THREAD_STACKSIZE);
106 static K_THREAD_STACK_DEFINE(thread_stack3, THREAD_STACKSIZE);
107 static struct k_thread thread_data1;
108 static struct k_thread thread_data2;
109 static struct k_thread thread_data3;
110 
111 static ISR_INFO isr_info;
112 
113 /**
114  * @brief Handler to perform various actions from within an ISR context
115  *
116  * This routine is the ISR handler for isr_handler_trigger(). It performs
117  * the command requested in <isr_info.command>.
118  */
isr_handler(const void * data)119 static void isr_handler(const void *data)
120 {
121 	ARG_UNUSED(data);
122 
123 	if (k_can_yield()) {
124 		isr_info.error = INVALID_BEHAVIOUR;
125 	}
126 
127 	switch (isr_info.command) {
128 	case THREAD_SELF_CMD:
129 		isr_info.data = (void *)k_current_get();
130 		break;
131 
132 	case EXEC_CTX_TYPE_CMD:
133 		if (k_is_in_isr()) {
134 			isr_info.value = K_ISR;
135 			break;
136 		}
137 
138 		if (arch_current_thread()->base.prio < 0) {
139 			isr_info.value = K_COOP_THREAD;
140 			break;
141 		}
142 
143 		isr_info.value = K_PREEMPT_THREAD;
144 
145 		break;
146 
147 	default:
148 		isr_info.error = UNKNOWN_COMMAND;
149 		break;
150 	}
151 }
152 
isr_handler_trigger(void)153 static void isr_handler_trigger(void)
154 {
155 	irq_offload(isr_handler, NULL);
156 }
157 
158 /**
159  *
160  * @brief Initialize kernel objects
161  *
162  * This routine initializes the kernel objects used in this module's tests.
163  *
164  */
kernel_init_objects(void)165 static void kernel_init_objects(void)
166 {
167 	k_sem_init(&reply_timeout, 0, UINT_MAX);
168 	k_timer_init(&timer, NULL, NULL);
169 	k_fifo_init(&timeout_order_fifo);
170 }
171 
172 /**
173  * @brief A wrapper for irq_lock()
174  *
175  * @return irq_lock() return value
176  */
irq_lock_wrapper(int unused)177 int irq_lock_wrapper(int unused)
178 {
179 	ARG_UNUSED(unused);
180 
181 	return irq_lock();
182 }
183 
184 /**
185  * @brief A wrapper for irq_unlock()
186  */
irq_unlock_wrapper(int imask)187 void irq_unlock_wrapper(int imask)
188 {
189 	irq_unlock(imask);
190 }
191 
192 /**
193  * @brief A wrapper for irq_disable()
194  *
195  * @return @a irq
196  */
irq_disable_wrapper(int irq)197 int irq_disable_wrapper(int irq)
198 {
199 	irq_disable(irq);
200 	return irq;
201 }
202 
203 /**
204  * @brief A wrapper for irq_enable()
205  */
irq_enable_wrapper(int irq)206 void irq_enable_wrapper(int irq)
207 {
208 	irq_enable(irq);
209 }
210 
211 #if defined(HAS_POWERSAVE_INSTRUCTION)
212 #if defined(CONFIG_TICKLESS_KERNEL)
213 static struct k_timer idle_timer;
214 
215 static volatile bool idle_timer_done;
216 
idle_timer_expiry_function(struct k_timer * timer_id)217 static void idle_timer_expiry_function(struct k_timer *timer_id)
218 {
219 	k_timer_stop(&idle_timer);
220 	idle_timer_done = true;
221 }
222 
_test_kernel_cpu_idle(int atomic)223 static void _test_kernel_cpu_idle(int atomic)
224 {
225 	uint64_t t0, dt;
226 	unsigned int i, key;
227 	uint32_t dur = k_ms_to_ticks_ceil32(10);
228 	uint32_t slop = 1 + k_ms_to_ticks_ceil32(1);
229 	int idle_loops;
230 
231 	/* Set up a time to trigger events to exit idle mode */
232 	k_timer_init(&idle_timer, idle_timer_expiry_function, NULL);
233 
234 	for (i = 0; i < 5; i++) {
235 		k_usleep(1);
236 		t0 = k_uptime_ticks();
237 		idle_loops = 0;
238 		idle_timer_done = false;
239 		k_timer_start(&idle_timer, K_TICKS(dur), K_NO_WAIT);
240 		key = irq_lock();
241 		do {
242 			if (atomic) {
243 				k_cpu_atomic_idle(key);
244 			} else {
245 				k_cpu_idle();
246 			}
247 		} while ((idle_loops++ < CONFIG_MAX_IDLE_WAKES) && (idle_timer_done == false));
248 		zassert_true(idle_timer_done,
249 			     "The CPU was waken spuriously too many times (%d > %d)",
250 			     idle_loops, CONFIG_MAX_IDLE_WAKES);
251 		dt = k_uptime_ticks() - t0;
252 		zassert_true(abs((int32_t) (dt - dur)) <= slop,
253 			     "Inaccurate wakeup, idled for %d ticks, expected %d",
254 			     dt, dur);
255 	}
256 }
257 
258 #else /* CONFIG_TICKLESS_KERNEL */
_test_kernel_cpu_idle(int atomic)259 static void _test_kernel_cpu_idle(int atomic)
260 {
261 	int tms, tms2;
262 	int i;
263 
264 	/* Align to a "ms boundary". */
265 	tms = k_uptime_get_32();
266 	while (tms == k_uptime_get_32()) {
267 		Z_SPIN_DELAY(50);
268 	}
269 
270 	tms = k_uptime_get_32();
271 	for (i = 0; i < 5; i++) { /* Repeat the test five times */
272 		if (atomic) {
273 			unsigned int key = irq_lock();
274 
275 			k_cpu_atomic_idle(key);
276 		} else {
277 			k_cpu_idle();
278 		}
279 		/* calculating milliseconds per tick*/
280 		tms += k_ticks_to_ms_floor64(1);
281 		tms2 = k_uptime_get_32();
282 		zassert_false(tms2 < tms, "Bad ms per tick value computed,"
283 			      "got %d which is less than %d\n",
284 			      tms2, tms);
285 	}
286 }
287 #endif /* CONFIG_TICKLESS_KERNEL */
288 
289 /**
290  * @brief Test cpu idle function
291  *
292  * @details
293  * Test Objective:
294  * - The kernel architecture provide an idle function to be run when the system
295  *   has no work for the current CPU
296  * - This routine tests the k_cpu_atomic_idle() routine
297  *
298  * Testing techniques
299  * - Functional and black box testing
300  * - Interface testing
301  *
302  * Prerequisite Condition:
303  * - HAS_POWERSAVE_INSTRUCTION is set
304  *
305  * Input Specifications:
306  * - N/A
307  *
308  * Test Procedure:
309  * -# Record system time before cpu enters idle state
310  * -# Enter cpu idle state by k_cpu_atomic_idle()
311  * -# Record system time after cpu idle state is interrupted
312  * -# Compare the two system time values.
313  *
314  * Expected Test Result:
315  * - cpu enters idle state for a given time
316  *
317  * Pass/Fail criteria:
318  * - Success if the cpu enters idle state, failure otherwise.
319  *
320  * Assumptions and Constraints
321  * - N/A
322  *
323  * @see k_cpu_atomic_idle()
324  * @ingroup kernel_context_tests
325  */
ZTEST(context_cpu_idle,test_cpu_idle_atomic)326 ZTEST(context_cpu_idle, test_cpu_idle_atomic)
327 {
328 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
329 	ztest_test_skip();
330 #else
331 	_test_kernel_cpu_idle(1);
332 #endif
333 }
334 
335 /**
336  * @brief Test cpu idle function
337  *
338  * @details
339  * Test Objective:
340  * - The kernel architecture provide an idle function to be run when the system
341  *   has no work for the current CPU
342  * - This routine tests the k_cpu_idle() routine
343  *
344  * Testing techniques
345  * - Functional and black box testing
346  * - Interface testing
347  *
348  * Prerequisite Condition:
349  * - HAS_POWERSAVE_INSTRUCTION is set
350  *
351  * Input Specifications:
352  * - N/A
353  *
354  * Test Procedure:
355  * -# Record system time before cpu enters idle state
356  * -# Enter cpu idle state by k_cpu_idle()
357  * -# Record system time after cpu idle state is interrupted
358  * -# Compare the two system time values.
359  *
360  * Expected Test Result:
361  * - cpu enters idle state for a given time
362  *
363  * Pass/Fail criteria:
364  * - Success if the cpu enters idle state, failure otherwise.
365  *
366  * Assumptions and Constraints
367  * - N/A
368  *
369  * @see k_cpu_idle()
370  * @ingroup kernel_context_tests
371  */
ZTEST(context_cpu_idle,test_cpu_idle)372 ZTEST(context_cpu_idle, test_cpu_idle)
373 {
374 	_test_kernel_cpu_idle(0);
375 }
376 
377 #else /* HAS_POWERSAVE_INSTRUCTION */
ZTEST(context_cpu_idle,test_cpu_idle)378 ZTEST(context_cpu_idle, test_cpu_idle)
379 {
380 	ztest_test_skip();
381 }
ZTEST(context_cpu_idle,test_cpu_idle_atomic)382 ZTEST(context_cpu_idle, test_cpu_idle_atomic)
383 {
384 	ztest_test_skip();
385 }
386 #endif
387 
_test_kernel_interrupts(disable_int_func disable_int,enable_int_func enable_int,int irq)388 static void _test_kernel_interrupts(disable_int_func disable_int,
389 				    enable_int_func enable_int, int irq)
390 {
391 	unsigned long long count = 1ull;
392 	unsigned long long i = 0;
393 	int tick;
394 	int tick2;
395 	int imask;
396 
397 	/* Align to a "tick boundary" */
398 	tick = sys_clock_tick_get_32();
399 	while (sys_clock_tick_get_32() == tick) {
400 		Z_SPIN_DELAY(1000);
401 	}
402 
403 	tick++;
404 	while (sys_clock_tick_get_32() == tick) {
405 		Z_SPIN_DELAY(1000);
406 		count++;
407 	}
408 
409 	/*
410 	 * Inflate <count> so that when we loop later, many ticks should have
411 	 * elapsed during the loop. This later loop will not exactly match the
412 	 * previous loop, but it should be close enough in structure that when
413 	 * combined with the inflated count, many ticks will have passed.
414 	 */
415 
416 	count <<= 4;
417 
418 	imask = disable_int(irq);
419 	tick = sys_clock_tick_get_32();
420 	for (i = 0; i < count; i++) {
421 		sys_clock_tick_get_32();
422 		Z_SPIN_DELAY(1000);
423 	}
424 
425 	tick2 = sys_clock_tick_get_32();
426 
427 	/*
428 	 * Re-enable interrupts before returning (for both success and failure
429 	 * cases).
430 	 */
431 	enable_int(imask);
432 
433 	/* In TICKLESS, current time is retrieved from a hardware
434 	 * counter and ticks DO advance with interrupts locked!
435 	 */
436 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
437 		zassert_equal(tick2, tick,
438 			      "tick advanced with interrupts locked");
439 	}
440 
441 	/* Now repeat with interrupts unlocked. */
442 	for (i = 0; i < count; i++) {
443 		sys_clock_tick_get_32();
444 		Z_SPIN_DELAY(1000);
445 	}
446 
447 	tick2 = sys_clock_tick_get_32();
448 	zassert_not_equal(tick, tick2,
449 			  "tick didn't advance as expected");
450 }
451 
452 /**
453  * @brief Test routines for disabling and enabling interrupts
454  *
455  * @ingroup kernel_context_tests
456  *
457  * @details
458  * Test Objective:
459  * - To verify kernel architecture layer shall provide a mechanism to
460  *   selectively disable and enable specific numeric interrupts.
461  * - This routine tests the routines for disabling and enabling interrupts.
462  *   These include irq_lock() and irq_unlock().
463  *
464  * Testing techniques:
465  * - Interface testing, function and black box testing,
466  *   dynamic analysis and testing
467  *
468  * Prerequisite Conditions:
469  * - CONFIG_TICKLESS_KERNEL is not set.
470  *
471  * Input Specifications:
472  * - N/A
473  *
474  * Test Procedure:
475  * -# Do action to align to a tick boundary.
476  * -# Left shift 4 bits for the value of counts.
477  * -# Call irq_lock() and restore its return value to imask.
478  * -# Call sys_clock_tick_get_32() and store its return value to tick.
479  * -# Repeat counts of calling sys_clock_tick_get_32().
480  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
481  * -# Call irq_unlock() with parameter imask.
482  * -# Check if tick is equal to tick2.
483  * -# Repeat counts of calling sys_clock_tick_get_32().
484  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
485  * -# Check if tick is NOT equal to tick2.
486  *
487  * Expected Test Result:
488  * - The ticks shall not increase while interrupt locked.
489  *
490  * Pass/Fail Criteria:
491  * - Successful if check points in test procedure are all passed, otherwise
492  *   failure.
493  *
494  * Assumptions and Constraints:
495  * - N/A
496  *
497  * @see irq_lock(), irq_unlock()
498  */
ZTEST(context,test_interrupts)499 ZTEST(context, test_interrupts)
500 {
501 	/* IRQ locks don't prevent ticks from advancing in tickless mode */
502 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
503 		ztest_test_skip();
504 	}
505 
506 	_test_kernel_interrupts(irq_lock_wrapper, irq_unlock_wrapper, -1);
507 }
508 
509 /**
510  * @brief Test routines for disabling and enabling interrupts (disable timer)
511  *
512  * @ingroup kernel_context_tests
513  *
514  * @details
515  * Test Objective:
516  * - To verify the kernel architecture layer shall provide a mechanism to
517  *   simultaneously mask all local CPU interrupts and return the previous mask
518  *   state for restoration.
519  * - This routine tests the routines for disabling and enabling interrupts.
520  *   These include irq_disable() and irq_enable().
521  *
522  * Testing techniques:
523  * - Interface testing, function and black box testing,
524  *   dynamic analysis and testing
525  *
526  * Prerequisite Conditions:
527  * - TICK_IRQ is defined.
528  *
529  * Input Specifications:
530  * - N/A
531  *
532  * Test Procedure:
533  * -# Do action to align to a tick boundary.
534  * -# Left shift 4 bit for the value of counts.
535  * -# Call irq_disable() and restore its return value to imask.
536  * -# Call sys_clock_tick_get_32() and store its return value to tick.
537  * -# Repeat counts of calling sys_clock_tick_get_32().
538  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
539  * -# Call irq_enable() with parameter imask.
540  * -# Check if tick is equal to tick2.
541  * -# Repeat counts of calling sys_clock_tick_get_32().
542  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
543  * -# Check if tick is NOT equal to tick2.
544  *
545  * Expected Test Result:
546  * - The ticks shall not increase while interrupt locked.
547  *
548  * Pass/Fail Criteria:
549  * - Successful if check points in test procedure are all passed, otherwise
550  *   failure.
551  *
552  * Assumptions and Constraints:
553  * - Note that this test works by disabling the timer interrupt
554  *   directly, without any interaction with the timer driver or
555  *   timeout subsystem.  NOT ALL ARCHITECTURES will latch and deliver
556  *   a timer interrupt that arrives while the interrupt is disabled,
557  *   which means that the timeout list will become corrupted (because
558  *   it contains items that should have expired in the past).  Any use
559  *   of kernel timeouts after completion of this test is disallowed.
560  *   RUN THIS TEST LAST IN THE SUITE.
561  *
562  * @see irq_disable(), irq_enable()
563  */
ZTEST(context_one_cpu,test_timer_interrupts)564 ZTEST(context_one_cpu, test_timer_interrupts)
565 {
566 #if (defined(TICK_IRQ) && defined(CONFIG_TICKLESS_KERNEL))
567 	/* Disable interrupts coming from the timer. */
568 	_test_kernel_interrupts(irq_disable_wrapper, irq_enable_wrapper, TICK_IRQ);
569 #else
570 	ztest_test_skip();
571 #endif
572 }
573 
574 /**
575  * @brief Test some context routines
576  *
577  * @details
578  * Test Objective:
579  * - Thread context handles derived from context switches must be able to be
580  *   restored upon interrupt exit
581  *
582  * Testing techniques
583  * - Functional and black box testing
584  * - Interface testing
585  *
586  * Prerequisite Condition:
587  * - N/A
588  *
589  * Input Specifications:
590  * - N/A
591  *
592  * Test Procedure:
593  * -# Set priority of current thread to 0 as a preemptible thread
594  * -# Trap to interrupt context, get thread id of the interrupted thread and
595  *  pass back to that thread.
596  * -# Return to thread context and make sure this context is interrupted by
597  *  comparing its thread ID and the thread ID passed by isr.
598  * -# Pass command to isr to check whether the isr is executed in interrupt
599  *  context
600  * -# When return to thread context, check the return value of command.
601  *
602  * Expected Test Result:
603  * - Thread context restored upon interrupt exit
604  *
605  * Pass/Fail criteria:
606  * - Success if context of thread restored correctly, failure otherwise.
607  *
608  * Assumptions and Constraints
609  * - N/A
610  *
611  * @ingroup kernel_context_tests
612  * @see k_current_get(), k_is_in_isr()
613  */
ZTEST(context,test_ctx_thread)614 ZTEST(context, test_ctx_thread)
615 {
616 	k_tid_t self_thread_id;
617 
618 	k_thread_priority_set(k_current_get(), 0);
619 
620 	TC_PRINT("Testing k_current_get() from an ISR and thread\n");
621 
622 	self_thread_id = k_current_get();
623 	isr_info.command = THREAD_SELF_CMD;
624 	isr_info.error = 0;
625 	/* isr_info is modified by the isr_handler routine */
626 	isr_handler_trigger();
627 
628 	zassert_false(isr_info.error, "ISR detected an error");
629 
630 	zassert_equal(isr_info.data, (void *)self_thread_id,
631 		      "ISR context ID mismatch");
632 
633 	TC_PRINT("Testing k_is_in_isr() from an ISR\n");
634 	isr_info.command = EXEC_CTX_TYPE_CMD;
635 	isr_info.error = 0;
636 	isr_handler_trigger();
637 
638 	zassert_false(isr_info.error, "ISR detected an error");
639 
640 	zassert_equal(isr_info.value, K_ISR,
641 		      "isr_info.value was not K_ISR");
642 
643 	TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
644 	zassert_false(k_is_in_isr(), "Should not be in ISR context");
645 
646 	zassert_false(arch_current_thread()->base.prio < 0,
647 		      "Current thread should have preemptible priority: %d",
648 		      arch_current_thread()->base.prio);
649 
650 }
651 
652 /**
653  * @brief Test the various context/thread routines from a cooperative thread
654  *
655  * This routines tests the k_current_get() and k_is_in_isr() routines from both
656  * a thread and an ISR (that interrupted a cooperative thread). Checking those
657  * routines with preemptible threads are done elsewhere.
658  *
659  * @see k_current_get(), k_is_in_isr()
660  */
_test_kernel_thread(k_tid_t _thread_id)661 static void _test_kernel_thread(k_tid_t _thread_id)
662 {
663 	k_tid_t self_thread_id;
664 
665 	self_thread_id = k_current_get();
666 	zassert_true((self_thread_id != _thread_id), "thread id matches parent thread");
667 
668 	isr_info.command = THREAD_SELF_CMD;
669 	isr_info.error = 0;
670 	isr_handler_trigger();
671 	/*
672 	 * Either the ISR detected an error, or the ISR context ID
673 	 * does not match the interrupted thread's ID.
674 	 */
675 	zassert_false((isr_info.error || (isr_info.data != (void *)self_thread_id)),
676 		      "Thread ID taken during ISR != calling thread");
677 
678 	isr_info.command = EXEC_CTX_TYPE_CMD;
679 	isr_info.error = 0;
680 	isr_handler_trigger();
681 	zassert_false((isr_info.error || (isr_info.value != K_ISR)),
682 		      "k_is_in_isr() when called from an ISR is false");
683 
684 	zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
685 
686 	zassert_false((arch_current_thread()->base.prio >= 0),
687 		      "thread is not a cooperative thread");
688 }
689 
690 /**
691  *
692  * @brief Entry point to the thread's helper
693  *
694  * This routine is the entry point to the thread's helper thread. It is used to
695  * help test the behavior of the k_yield() routine.
696  *
697  * @param arg1    unused
698  * @param arg2    unused
699  * @param arg3    unused
700  *
701  */
702 
thread_helper(void * arg1,void * arg2,void * arg3)703 static void thread_helper(void *arg1, void *arg2, void *arg3)
704 {
705 	k_tid_t self_thread_id;
706 
707 	ARG_UNUSED(arg1);
708 	ARG_UNUSED(arg2);
709 	ARG_UNUSED(arg3);
710 
711 	/*
712 	 * This thread starts off at a higher priority than thread_entry().
713 	 * Thus, it should execute immediately.
714 	 */
715 	thread_evidence++;
716 
717 	/* Test that helper will yield to a thread of equal priority */
718 	self_thread_id = k_current_get();
719 
720 	/* Lower priority to that of thread_entry() */
721 	k_thread_priority_set(self_thread_id, self_thread_id->base.prio + 1);
722 
723 	k_yield();      /* Yield to thread of equal priority */
724 
725 	thread_evidence++;
726 	/* thread_evidence should now be 2 */
727 
728 }
729 
730 /**
731  * @brief Entry point to thread started by another thread
732  *
733  * This routine is the entry point to the thread started by the thread.
734  */
k_yield_entry(void * arg0,void * arg1,void * arg2)735 static void k_yield_entry(void *arg0, void *arg1, void *arg2)
736 {
737 	k_tid_t self_thread_id;
738 
739 	ARG_UNUSED(arg0);
740 	ARG_UNUSED(arg1);
741 	ARG_UNUSED(arg2);
742 
743 	thread_evidence++;      /* Prove that the thread has run */
744 	k_sem_take(&sem_thread, K_FOREVER);
745 
746 	/*
747 	 * Start a thread of higher priority. Note that since the new thread is
748 	 * being started from a thread, it will not automatically switch to the
749 	 * thread as it would if done from another thread.
750 	 */
751 	self_thread_id = k_current_get();
752 	thread_evidence = 0;
753 
754 	k_thread_create(&thread_data2, thread_stack2, THREAD_STACKSIZE,
755 			thread_helper, NULL, NULL, NULL,
756 			K_PRIO_COOP(THREAD_PRIORITY - 1), 0, K_NO_WAIT);
757 
758 	zassert_equal(thread_evidence, 0,
759 		      "Helper created at higher priority ran prematurely.");
760 
761 	/*
762 	 * Validate the thread is allowed to yield
763 	 */
764 	zassert_true(k_can_yield(), "Thread incorrectly detected it could not yield");
765 
766 	/*
767 	 * Test that the thread will yield to the higher priority helper.
768 	 * thread_evidence is still 0.
769 	 */
770 	k_yield();
771 
772 	zassert_not_equal(thread_evidence, 0,
773 			  "k_yield() did not yield to a higher priority thread: %d",
774 			  thread_evidence);
775 
776 	zassert_false((thread_evidence > 1),
777 		      "k_yield() did not yield to an equal priority thread: %d",
778 		      thread_evidence);
779 
780 	/*
781 	 * Raise the priority of thread_entry(). Calling k_yield() should
782 	 * not result in switching to the helper.
783 	 */
784 	k_thread_priority_set(self_thread_id, self_thread_id->base.prio - 1);
785 	k_yield();
786 
787 	zassert_equal(thread_evidence, 1,
788 		      "k_yield() yielded to a lower priority thread");
789 
790 	/*
791 	 * Block on sem_thread. This will allow the helper thread to
792 	 * complete. The main thread will wake this thread.
793 	 */
794 	k_sem_take(&sem_thread, K_FOREVER);
795 }
796 
kernel_thread_entry(void * _thread_id,void * arg1,void * arg2)797 static void kernel_thread_entry(void *_thread_id, void *arg1, void *arg2)
798 {
799 	ARG_UNUSED(arg1);
800 	ARG_UNUSED(arg2);
801 
802 	thread_evidence++;      /* Prove that the thread has run */
803 	k_sem_take(&sem_thread, K_FOREVER);
804 
805 	_test_kernel_thread((k_tid_t) _thread_id);
806 
807 }
808 
809 /*
810  * @brief Timeout tests
811  *
812  * Test the k_sleep() API, as well as the k_thread_create() ones.
813  */
814 struct timeout_order {
815 	void *link_in_fifo;
816 	int32_t timeout;
817 	int timeout_order;
818 	int q_order;
819 };
820 
821 struct timeout_order timeouts[] = {
822 	{ 0, 1000, 2, 0 },
823 	{ 0, 1500, 4, 1 },
824 	{ 0, 500, 0, 2 },
825 	{ 0, 750, 1, 3 },
826 	{ 0, 1750, 5, 4 },
827 	{ 0, 2000, 6, 5 },
828 	{ 0, 1250, 3, 6 },
829 };
830 
831 #define NUM_TIMEOUT_THREADS ARRAY_SIZE(timeouts)
832 static K_THREAD_STACK_ARRAY_DEFINE(timeout_stacks, NUM_TIMEOUT_THREADS,
833 				   THREAD_STACKSIZE2);
834 static struct k_thread timeout_threads[NUM_TIMEOUT_THREADS];
835 
836 /* a thread busy waits */
busy_wait_thread(void * mseconds,void * arg2,void * arg3)837 static void busy_wait_thread(void *mseconds, void *arg2, void *arg3)
838 {
839 	uint32_t usecs;
840 
841 	ARG_UNUSED(arg2);
842 	ARG_UNUSED(arg3);
843 
844 	usecs = POINTER_TO_INT(mseconds) * 1000;
845 
846 	k_busy_wait(usecs);
847 
848 	/* FIXME: Broken on Nios II, see #22956 */
849 #ifndef CONFIG_NIOS2
850 	int key = arch_irq_lock();
851 
852 	k_busy_wait(usecs);
853 	arch_irq_unlock(key);
854 #endif
855 
856 	/*
857 	 * Ideally the test should verify that the correct number of ticks
858 	 * have elapsed. However, when running under QEMU, the tick interrupt
859 	 * may be processed on a very irregular basis, meaning that far
860 	 * fewer than the expected number of ticks may occur for a given
861 	 * number of clock cycles vs. what would ordinarily be expected.
862 	 *
863 	 * Consequently, the best we can do for now to test busy waiting is
864 	 * to invoke the API and verify that it returns. (If it takes way
865 	 * too long, or never returns, the main test thread may be able to
866 	 * time out and report an error.)
867 	 */
868 
869 	k_sem_give(&reply_timeout);
870 }
871 
872 /* a thread sleeps and times out, then reports through a fifo */
thread_sleep(void * delta,void * arg2,void * arg3)873 static void thread_sleep(void *delta, void *arg2, void *arg3)
874 {
875 	int64_t timestamp;
876 	int timeout = POINTER_TO_INT(delta);
877 
878 	ARG_UNUSED(arg2);
879 	ARG_UNUSED(arg3);
880 
881 	timestamp = k_uptime_get();
882 	k_msleep(timeout);
883 	timestamp = k_uptime_get() - timestamp;
884 
885 	int slop = MAX(k_ticks_to_ms_floor64(2), 1);
886 
887 	if (timestamp < timeout || timestamp > timeout + slop) {
888 		TC_ERROR("timestamp out of range, got %d\n", (int)timestamp);
889 		return;
890 	}
891 
892 	k_sem_give(&reply_timeout);
893 }
894 
895 /* a thread is started with a delay, then it reports that it ran via a fifo */
delayed_thread(void * num,void * arg2,void * arg3)896 static void delayed_thread(void *num, void *arg2, void *arg3)
897 {
898 	struct timeout_order *timeout = &timeouts[POINTER_TO_INT(num)];
899 
900 	ARG_UNUSED(arg2);
901 	ARG_UNUSED(arg3);
902 
903 	TC_PRINT(" thread (q order: %d, t/o: %d) is running\n",
904 		 timeout->q_order, timeout->timeout);
905 
906 	k_fifo_put(&timeout_order_fifo, timeout);
907 }
908 
909 /**
910  * @brief Test timeouts
911  *
912  * @ingroup kernel_context_tests
913  *
914  * @see k_busy_wait(), k_sleep()
915  */
ZTEST(context_one_cpu,test_busy_wait)916 ZTEST(context_one_cpu, test_busy_wait)
917 {
918 	int32_t timeout;
919 	int rv;
920 
921 	timeout = 20;           /* in ms */
922 
923 	k_thread_create(&timeout_threads[0], timeout_stacks[0],
924 			THREAD_STACKSIZE2, busy_wait_thread,
925 			INT_TO_POINTER(timeout), NULL,
926 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
927 
928 	rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2 * 2));
929 
930 	zassert_false(rv, " *** thread timed out waiting for " "k_busy_wait()");
931 }
932 
933 /**
934  * @brief Test timeouts
935  *
936  * @ingroup kernel_context_tests
937  *
938  * @see k_sleep()
939  */
ZTEST(context_one_cpu,test_k_sleep)940 ZTEST(context_one_cpu, test_k_sleep)
941 {
942 	struct timeout_order *data;
943 	int32_t timeout;
944 	int rv;
945 	int i;
946 
947 
948 	timeout = 50;
949 
950 	k_thread_create(&timeout_threads[0], timeout_stacks[0],
951 			THREAD_STACKSIZE2, thread_sleep,
952 			INT_TO_POINTER(timeout), NULL,
953 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
954 
955 	rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2));
956 	zassert_equal(rv, 0, " *** thread timed out waiting for thread on "
957 		      "k_sleep().");
958 
959 	/* test k_thread_create() without cancellation */
960 	TC_PRINT("Testing k_thread_create() without cancellation\n");
961 
962 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
963 		k_thread_create(&timeout_threads[i], timeout_stacks[i],
964 				THREAD_STACKSIZE2,
965 				delayed_thread,
966 				INT_TO_POINTER(i), NULL, NULL,
967 				K_PRIO_COOP(5), 0,
968 				K_MSEC(timeouts[i].timeout));
969 	}
970 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
971 		data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
972 		zassert_not_null(data, " *** timeout while waiting for"
973 				 " delayed thread");
974 
975 		zassert_equal(data->timeout_order, i,
976 			      " *** wrong delayed thread ran (got %d, "
977 			      "expected %d)\n", data->timeout_order, i);
978 
979 		TC_PRINT(" got thread (q order: %d, t/o: %d) as expected\n",
980 			 data->q_order, data->timeout);
981 	}
982 
983 	/* ensure no more thread fire */
984 	data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
985 
986 	zassert_false(data, " *** got something unexpected in the fifo");
987 
988 	/* test k_thread_create() with cancellation */
989 	TC_PRINT("Testing k_thread_create() with cancellations\n");
990 
991 	int cancellations[] = { 0, 3, 4, 6 };
992 	int num_cancellations = ARRAY_SIZE(cancellations);
993 	int next_cancellation = 0;
994 
995 	k_tid_t delayed_threads[NUM_TIMEOUT_THREADS];
996 
997 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
998 		k_tid_t id;
999 
1000 		id = k_thread_create(&timeout_threads[i], timeout_stacks[i],
1001 				     THREAD_STACKSIZE2, delayed_thread,
1002 				     INT_TO_POINTER(i), NULL, NULL,
1003 				     K_PRIO_COOP(5), 0,
1004 				     K_MSEC(timeouts[i].timeout));
1005 
1006 		delayed_threads[i] = id;
1007 	}
1008 
1009 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
1010 		int j;
1011 
1012 		if (i == cancellations[next_cancellation]) {
1013 			TC_PRINT(" cancelling "
1014 				 "[q order: %d, t/o: %d, t/o order: %d]\n",
1015 				 timeouts[i].q_order, timeouts[i].timeout, i);
1016 
1017 			for (j = 0; j < NUM_TIMEOUT_THREADS; j++) {
1018 				if (timeouts[j].timeout_order == i) {
1019 					break;
1020 				}
1021 			}
1022 
1023 			if (j < NUM_TIMEOUT_THREADS) {
1024 				k_thread_abort(delayed_threads[j]);
1025 				++next_cancellation;
1026 				continue;
1027 			}
1028 		}
1029 
1030 		data = k_fifo_get(&timeout_order_fifo, K_MSEC(2750));
1031 
1032 		zassert_not_null(data, " *** timeout while waiting for"
1033 				 " delayed thread");
1034 
1035 		zassert_equal(data->timeout_order, i,
1036 			      " *** wrong delayed thread ran (got %d, "
1037 			      "expected %d)\n", data->timeout_order, i);
1038 
1039 		TC_PRINT(" got (q order: %d, t/o: %d, t/o order %d) "
1040 			 "as expected\n", data->q_order, data->timeout,
1041 			 data->timeout_order);
1042 	}
1043 
1044 	zassert_equal(num_cancellations, next_cancellation,
1045 		      " *** wrong number of cancellations (expected %d, "
1046 		      "got %d\n", num_cancellations, next_cancellation);
1047 
1048 	/* ensure no more thread fire */
1049 	data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
1050 	zassert_false(data, " *** got something unexpected in the fifo");
1051 
1052 }
1053 
1054 /**
1055  *
1056  * @brief Test the k_yield() routine
1057  *
1058  * @ingroup kernel_context_tests
1059  *
1060  * Tests the k_yield() routine. It starts another thread
1061  * (thus also testing k_thread_create()) and checks that behavior of
1062  * k_yield() against the a higher priority thread,
1063  * a lower priority thread, and another thread of equal priority.
1064  *
1065  * @see k_yield()
1066  */
ZTEST(context_one_cpu,test_k_yield)1067 ZTEST(context_one_cpu, test_k_yield)
1068 {
1069 	thread_evidence = 0;
1070 	k_thread_priority_set(k_current_get(), 0);
1071 
1072 	k_sem_init(&sem_thread, 0, UINT_MAX);
1073 
1074 	k_thread_create(&thread_data1, thread_stack1, THREAD_STACKSIZE,
1075 			k_yield_entry, NULL, NULL,
1076 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
1077 
1078 	zassert_equal(thread_evidence, 1,
1079 		      "Thread did not execute as expected!: %d", thread_evidence);
1080 
1081 	k_sem_give(&sem_thread);
1082 	k_sem_give(&sem_thread);
1083 	k_sem_give(&sem_thread);
1084 }
1085 
1086 /**
1087  * @brief Test kernel thread creation
1088  *
1089  * @ingroup kernel_context_tests
1090  *
1091  * @see k_thread_create
1092  */
1093 
ZTEST(context_one_cpu,test_thread)1094 ZTEST(context_one_cpu, test_thread)
1095 {
1096 
1097 	k_thread_create(&thread_data3, thread_stack3, THREAD_STACKSIZE,
1098 			kernel_thread_entry, NULL, NULL,
1099 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
1100 
1101 }
1102 
context_setup(void)1103 static void *context_setup(void)
1104 {
1105 	kernel_init_objects();
1106 
1107 	return NULL;
1108 }
1109 
1110 ZTEST_SUITE(context_cpu_idle, NULL, context_setup, NULL, NULL, NULL);
1111 
1112 ZTEST_SUITE(context, NULL, context_setup, NULL, NULL, NULL);
1113 
1114 ZTEST_SUITE(context_one_cpu, NULL, context_setup,
1115 		ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
1116