1 /*
2  * Copyright (c) 2012-2015 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * @brief test context and thread APIs
9  *
10  * @defgroup kernel_context_tests Context Tests
11  *
12  * @ingroup all_tests
13  *
14  * This module tests the following CPU and thread related routines:
15  * k_thread_create(), k_yield(), k_is_in_isr(),
16  * k_current_get(), k_cpu_idle(), k_cpu_atomic_idle(),
17  * irq_lock(), irq_unlock(),
18  * irq_offload(), irq_enable(), irq_disable(),
19  * @{
20  * @}
21  */
22 
23 #include <stdlib.h>
24 #include <zephyr/ztest.h>
25 #include <zephyr/kernel_structs.h>
26 #include <zephyr/arch/cpu.h>
27 #include <zephyr/irq_offload.h>
28 #include <zephyr/sys_clock.h>
29 
30 #if defined(CONFIG_SOC_POSIX)
31 /* TIMER_TICK_IRQ <soc.h> header for certain platforms */
32 #include <soc.h>
33 #endif
34 
35 #define THREAD_STACKSIZE    (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
36 #define THREAD_STACKSIZE2   (384 + CONFIG_TEST_EXTRA_STACK_SIZE)
37 #define THREAD_PRIORITY     4
38 
39 #define THREAD_SELF_CMD    0
40 #define EXEC_CTX_TYPE_CMD  1
41 
42 #define UNKNOWN_COMMAND    -1
43 #define INVALID_BEHAVIOUR  -2
44 
45 /*
46  * Get the timer type dependent IRQ number. If timer type
47  * is not defined in platform, generate an error
48  */
49 
50 #if defined(CONFIG_APIC_TSC_DEADLINE_TIMER)
51 #define TICK_IRQ z_loapic_irq_base() /* first LVT interrupt */
52 #elif defined(CONFIG_CPU_CORTEX_M)
53 /*
54  * The Cortex-M use the SYSTICK exception for the system timer, which is
55  * not considered an IRQ by the irq_enable/Disable APIs.
56  */
57 #elif defined(CONFIG_SPARC)
58 #elif defined(CONFIG_MIPS)
59 #elif defined(CONFIG_ARCH_POSIX)
60 #if  defined(CONFIG_BOARD_NATIVE_POSIX)
61 #define TICK_IRQ TIMER_TICK_IRQ
62 #else
63 /*
64  * Other POSIX arch boards will skip the irq_disable() and irq_enable() test
65  * unless TICK_IRQ is defined here for them
66  */
67 #endif /* defined(CONFIG_ARCH_POSIX) */
68 #else
69 
70 extern const int32_t z_sys_timer_irq_for_test;
71 #define TICK_IRQ (z_sys_timer_irq_for_test)
72 
73 #endif
74 
75 /* Cortex-M1, Nios II, and RISCV without CONFIG_RISCV_HAS_CPU_IDLE
76  * do have a power saving instruction, so k_cpu_idle() returns immediately
77  */
78 #if !defined(CONFIG_CPU_CORTEX_M1) && !defined(CONFIG_NIOS2) && \
79 	(!defined(CONFIG_RISCV) || defined(CONFIG_RISCV_HAS_CPU_IDLE))
80 #define HAS_POWERSAVE_INSTRUCTION
81 #endif
82 
83 
84 
85 typedef struct {
86 	int command;            /* command to process   */
87 	int error;              /* error value (if any) */
88 	union {
89 		void *data;     /* pointer to data to use or return */
90 		int value;      /* value to be passed or returned   */
91 	};
92 } ISR_INFO;
93 
94 
95 typedef int (*disable_int_func) (int);
96 typedef void (*enable_int_func) (int);
97 
98 static struct k_sem sem_thread;
99 static struct k_timer timer;
100 static struct k_sem reply_timeout;
101 struct k_fifo timeout_order_fifo;
102 
103 static int thread_evidence;
104 
105 static K_THREAD_STACK_DEFINE(thread_stack1, THREAD_STACKSIZE);
106 static K_THREAD_STACK_DEFINE(thread_stack2, THREAD_STACKSIZE);
107 static K_THREAD_STACK_DEFINE(thread_stack3, THREAD_STACKSIZE);
108 static struct k_thread thread_data1;
109 static struct k_thread thread_data2;
110 static struct k_thread thread_data3;
111 
112 static ISR_INFO isr_info;
113 
114 /**
115  * @brief Handler to perform various actions from within an ISR context
116  *
117  * This routine is the ISR handler for isr_handler_trigger(). It performs
118  * the command requested in <isr_info.command>.
119  */
isr_handler(const void * data)120 static void isr_handler(const void *data)
121 {
122 	ARG_UNUSED(data);
123 
124 	if (k_can_yield()) {
125 		isr_info.error = INVALID_BEHAVIOUR;
126 	}
127 
128 	switch (isr_info.command) {
129 	case THREAD_SELF_CMD:
130 		isr_info.data = (void *)k_current_get();
131 		break;
132 
133 	case EXEC_CTX_TYPE_CMD:
134 		if (k_is_in_isr()) {
135 			isr_info.value = K_ISR;
136 			break;
137 		}
138 
139 		if (_current->base.prio < 0) {
140 			isr_info.value = K_COOP_THREAD;
141 			break;
142 		}
143 
144 		isr_info.value = K_PREEMPT_THREAD;
145 
146 		break;
147 
148 	default:
149 		isr_info.error = UNKNOWN_COMMAND;
150 		break;
151 	}
152 }
153 
isr_handler_trigger(void)154 static void isr_handler_trigger(void)
155 {
156 	irq_offload(isr_handler, NULL);
157 }
158 
159 /**
160  *
161  * @brief Initialize kernel objects
162  *
163  * This routine initializes the kernel objects used in this module's tests.
164  *
165  */
kernel_init_objects(void)166 static void kernel_init_objects(void)
167 {
168 	k_sem_init(&reply_timeout, 0, UINT_MAX);
169 	k_timer_init(&timer, NULL, NULL);
170 	k_fifo_init(&timeout_order_fifo);
171 }
172 
173 /**
174  * @brief A wrapper for irq_lock()
175  *
176  * @return irq_lock() return value
177  */
irq_lock_wrapper(int unused)178 int irq_lock_wrapper(int unused)
179 {
180 	ARG_UNUSED(unused);
181 
182 	return irq_lock();
183 }
184 
185 /**
186  * @brief A wrapper for irq_unlock()
187  */
irq_unlock_wrapper(int imask)188 void irq_unlock_wrapper(int imask)
189 {
190 	irq_unlock(imask);
191 }
192 
193 /**
194  * @brief A wrapper for irq_disable()
195  *
196  * @return @a irq
197  */
irq_disable_wrapper(int irq)198 int irq_disable_wrapper(int irq)
199 {
200 	irq_disable(irq);
201 	return irq;
202 }
203 
204 /**
205  * @brief A wrapper for irq_enable()
206  */
irq_enable_wrapper(int irq)207 void irq_enable_wrapper(int irq)
208 {
209 	irq_enable(irq);
210 }
211 
212 #if defined(HAS_POWERSAVE_INSTRUCTION)
213 #if defined(CONFIG_TICKLESS_KERNEL)
214 static struct k_timer idle_timer;
215 
idle_timer_expiry_function(struct k_timer * timer_id)216 static void idle_timer_expiry_function(struct k_timer *timer_id)
217 {
218 	k_timer_stop(&idle_timer);
219 }
220 
_test_kernel_cpu_idle(int atomic)221 static void _test_kernel_cpu_idle(int atomic)
222 {
223 	uint64_t t0, dt;
224 	unsigned int i, key;
225 	uint32_t dur = k_ms_to_ticks_ceil32(10);
226 	uint32_t slop = 1 + k_ms_to_ticks_ceil32(1);
227 
228 	/* Set up a time to trigger events to exit idle mode */
229 	k_timer_init(&idle_timer, idle_timer_expiry_function, NULL);
230 
231 	for (i = 0; i < 5; i++) {
232 		k_usleep(1);
233 		t0 = k_uptime_ticks();
234 		k_timer_start(&idle_timer, K_TICKS(dur), K_NO_WAIT);
235 		key = irq_lock();
236 		if (atomic) {
237 			k_cpu_atomic_idle(key);
238 		} else {
239 			k_cpu_idle();
240 		}
241 		dt = k_uptime_ticks() - t0;
242 		zassert_true(abs((int32_t) (dt - dur)) <= slop,
243 			     "Inaccurate wakeup, idled for %d ticks, expected %d",
244 			     dt, dur);
245 	}
246 }
247 
248 #else /* CONFIG_TICKLESS_KERNEL */
_test_kernel_cpu_idle(int atomic)249 static void _test_kernel_cpu_idle(int atomic)
250 {
251 	int tms, tms2;
252 	int i;
253 
254 	/* Align to a "ms boundary". */
255 	tms = k_uptime_get_32();
256 	while (tms == k_uptime_get_32()) {
257 		Z_SPIN_DELAY(50);
258 	}
259 
260 	tms = k_uptime_get_32();
261 	for (i = 0; i < 5; i++) { /* Repeat the test five times */
262 		if (atomic) {
263 			unsigned int key = irq_lock();
264 
265 			k_cpu_atomic_idle(key);
266 		} else {
267 			k_cpu_idle();
268 		}
269 		/* calculating milliseconds per tick*/
270 		tms += k_ticks_to_ms_floor64(1);
271 		tms2 = k_uptime_get_32();
272 		zassert_false(tms2 < tms, "Bad ms per tick value computed,"
273 			      "got %d which is less than %d\n",
274 			      tms2, tms);
275 	}
276 }
277 #endif /* CONFIG_TICKLESS_KERNEL */
278 
279 /**
280  * @brief Test cpu idle function
281  *
282  * @details
283  * Test Objective:
284  * - The kernel architecture provide an idle function to be run when the system
285  *   has no work for the current CPU
286  * - This routine tests the k_cpu_atomic_idle() routine
287  *
288  * Testing techniques
289  * - Functional and black box testing
290  * - Interface testing
291  *
292  * Prerequisite Condition:
293  * - HAS_POWERSAVE_INSTRUCTION is set
294  *
295  * Input Specifications:
296  * - N/A
297  *
298  * Test Procedure:
299  * -# Record system time before cpu enters idle state
300  * -# Enter cpu idle state by k_cpu_atomic_idle()
301  * -# Record system time after cpu idle state is interrupted
302  * -# Compare the two system time values.
303  *
304  * Expected Test Result:
305  * - cpu enters idle state for a given time
306  *
307  * Pass/Fail criteria:
308  * - Success if the cpu enters idle state, failure otherwise.
309  *
310  * Assumptions and Constraints
311  * - N/A
312  *
313  * @see k_cpu_atomic_idle()
314  * @ingroup kernel_context_tests
315  */
ZTEST(context_cpu_idle,test_cpu_idle_atomic)316 ZTEST(context_cpu_idle, test_cpu_idle_atomic)
317 {
318 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
319 	ztest_test_skip();
320 #else
321 	_test_kernel_cpu_idle(1);
322 #endif
323 }
324 
325 /**
326  * @brief Test cpu idle function
327  *
328  * @details
329  * Test Objective:
330  * - The kernel architecture provide an idle function to be run when the system
331  *   has no work for the current CPU
332  * - This routine tests the k_cpu_idle() routine
333  *
334  * Testing techniques
335  * - Functional and black box testing
336  * - Interface testing
337  *
338  * Prerequisite Condition:
339  * - HAS_POWERSAVE_INSTRUCTION is set
340  *
341  * Input Specifications:
342  * - N/A
343  *
344  * Test Procedure:
345  * -# Record system time before cpu enters idle state
346  * -# Enter cpu idle state by k_cpu_idle()
347  * -# Record system time after cpu idle state is interrupted
348  * -# Compare the two system time values.
349  *
350  * Expected Test Result:
351  * - cpu enters idle state for a given time
352  *
353  * Pass/Fail criteria:
354  * - Success if the cpu enters idle state, failure otherwise.
355  *
356  * Assumptions and Constraints
357  * - N/A
358  *
359  * @see k_cpu_idle()
360  * @ingroup kernel_context_tests
361  */
ZTEST(context_cpu_idle,test_cpu_idle)362 ZTEST(context_cpu_idle, test_cpu_idle)
363 {
364 /*
365  * Fixme: remove the skip code when sleep instruction in
366  * nsim_hs_smp is fixed.
367  */
368 #if defined(CONFIG_SOC_NSIM) && defined(CONFIG_SMP)
369 	ztest_test_skip();
370 #endif
371 	_test_kernel_cpu_idle(0);
372 }
373 
374 #else /* HAS_POWERSAVE_INSTRUCTION */
ZTEST(context_cpu_idle,test_cpu_idle)375 ZTEST(context_cpu_idle, test_cpu_idle)
376 {
377 	ztest_test_skip();
378 }
ZTEST(context_cpu_idle,test_cpu_idle_atomic)379 ZTEST(context_cpu_idle, test_cpu_idle_atomic)
380 {
381 	ztest_test_skip();
382 }
383 #endif
384 
_test_kernel_interrupts(disable_int_func disable_int,enable_int_func enable_int,int irq)385 static void _test_kernel_interrupts(disable_int_func disable_int,
386 				    enable_int_func enable_int, int irq)
387 {
388 	unsigned long long count = 1ull;
389 	unsigned long long i = 0;
390 	int tick;
391 	int tick2;
392 	int imask;
393 
394 	/* Align to a "tick boundary" */
395 	tick = sys_clock_tick_get_32();
396 	while (sys_clock_tick_get_32() == tick) {
397 		Z_SPIN_DELAY(1000);
398 	}
399 
400 	tick++;
401 	while (sys_clock_tick_get_32() == tick) {
402 		Z_SPIN_DELAY(1000);
403 		count++;
404 	}
405 
406 	/*
407 	 * Inflate <count> so that when we loop later, many ticks should have
408 	 * elapsed during the loop. This later loop will not exactly match the
409 	 * previous loop, but it should be close enough in structure that when
410 	 * combined with the inflated count, many ticks will have passed.
411 	 */
412 
413 	count <<= 4;
414 
415 	imask = disable_int(irq);
416 	tick = sys_clock_tick_get_32();
417 	for (i = 0; i < count; i++) {
418 		sys_clock_tick_get_32();
419 		Z_SPIN_DELAY(1000);
420 	}
421 
422 	tick2 = sys_clock_tick_get_32();
423 
424 	/*
425 	 * Re-enable interrupts before returning (for both success and failure
426 	 * cases).
427 	 */
428 	enable_int(imask);
429 
430 	/* In TICKLESS, current time is retrieved from a hardware
431 	 * counter and ticks DO advance with interrupts locked!
432 	 */
433 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
434 		zassert_equal(tick2, tick,
435 			      "tick advanced with interrupts locked");
436 	}
437 
438 	/* Now repeat with interrupts unlocked. */
439 	for (i = 0; i < count; i++) {
440 		sys_clock_tick_get_32();
441 		Z_SPIN_DELAY(1000);
442 	}
443 
444 	tick2 = sys_clock_tick_get_32();
445 	zassert_not_equal(tick, tick2,
446 			  "tick didn't advance as expected");
447 }
448 
449 /**
450  * @brief Test routines for disabling and enabling interrupts
451  *
452  * @ingroup kernel_context_tests
453  *
454  * @details
455  * Test Objective:
456  * - To verify kernel architecture layer shall provide a mechanism to
457  *   selectively disable and enable specific numeric interrupts.
458  * - This routine tests the routines for disabling and enabling interrupts.
459  *   These include irq_lock() and irq_unlock().
460  *
461  * Testing techniques:
462  * - Interface testing, function and black box testing,
463  *   dynamic analysis and testing
464  *
465  * Prerequisite Conditions:
466  * - CONFIG_TICKLESS_KERNEL is not set.
467  *
468  * Input Specifications:
469  * - N/A
470  *
471  * Test Procedure:
472  * -# Do action to align to a tick boundary.
473  * -# Left shift 4 bits for the value of counts.
474  * -# Call irq_lock() and restore its return value to imask.
475  * -# Call sys_clock_tick_get_32() and store its return value to tick.
476  * -# Repeat counts of calling sys_clock_tick_get_32().
477  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
478  * -# Call irq_unlock() with parameter imask.
479  * -# Check if tick is equal to tick2.
480  * -# Repeat counts of calling sys_clock_tick_get_32().
481  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
482  * -# Check if tick is NOT equal to tick2.
483  *
484  * Expected Test Result:
485  * - The ticks shall not increase while interrupt locked.
486  *
487  * Pass/Fail Criteria:
488  * - Successful if check points in test procedure are all passed, otherwise
489  *   failure.
490  *
491  * Assumptions and Constraints:
492  * - N/A
493  *
494  * @see irq_lock(), irq_unlock()
495  */
ZTEST(context,test_interrupts)496 ZTEST(context, test_interrupts)
497 {
498 	/* IRQ locks don't prevent ticks from advancing in tickless mode */
499 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
500 		ztest_test_skip();
501 	}
502 
503 	_test_kernel_interrupts(irq_lock_wrapper, irq_unlock_wrapper, -1);
504 }
505 
506 /**
507  * @brief Test routines for disabling and enabling interrupts (disable timer)
508  *
509  * @ingroup kernel_context_tests
510  *
511  * @details
512  * Test Objective:
513  * - To verify the kernel architecture layer shall provide a mechanism to
514  *   simultaneously mask all local CPU interrupts and return the previous mask
515  *   state for restoration.
516  * - This routine tests the routines for disabling and enabling interrupts.
517  *   These include irq_disable() and irq_enable().
518  *
519  * Testing techniques:
520  * - Interface testing, function and black box testing,
521  *   dynamic analysis and testing
522  *
523  * Prerequisite Conditions:
524  * - TICK_IRQ is defined.
525  *
526  * Input Specifications:
527  * - N/A
528  *
529  * Test Procedure:
530  * -# Do action to align to a tick boundary.
531  * -# Left shift 4 bit for the value of counts.
532  * -# Call irq_disable() and restore its return value to imask.
533  * -# Call sys_clock_tick_get_32() and store its return value to tick.
534  * -# Repeat counts of calling sys_clock_tick_get_32().
535  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
536  * -# Call irq_enable() with parameter imask.
537  * -# Check if tick is equal to tick2.
538  * -# Repeat counts of calling sys_clock_tick_get_32().
539  * -# Call sys_clock_tick_get_32() and store its return value to tick2.
540  * -# Check if tick is NOT equal to tick2.
541  *
542  * Expected Test Result:
543  * - The ticks shall not increase while interrupt locked.
544  *
545  * Pass/Fail Criteria:
546  * - Successful if check points in test procedure are all passed, otherwise
547  *   failure.
548  *
549  * Assumptions and Constraints:
550  * - Note that this test works by disabling the timer interrupt
551  *   directly, without any interaction with the timer driver or
552  *   timeout subsystem.  NOT ALL ARCHITECTURES will latch and deliver
553  *   a timer interrupt that arrives while the interrupt is disabled,
554  *   which means that the timeout list will become corrupted (because
555  *   it contains items that should have expired in the past).  Any use
556  *   of kernel timeouts after completion of this test is disallowed.
557  *   RUN THIS TEST LAST IN THE SUITE.
558  *
559  * @see irq_disable(), irq_enable()
560  */
ZTEST(context_one_cpu,test_timer_interrupts)561 ZTEST(context_one_cpu, test_timer_interrupts)
562 {
563 #if (defined(TICK_IRQ) && defined(CONFIG_TICKLESS_KERNEL))
564 	/* Disable interrupts coming from the timer. */
565 	_test_kernel_interrupts(irq_disable_wrapper, irq_enable_wrapper, TICK_IRQ);
566 #else
567 	ztest_test_skip();
568 #endif
569 }
570 
571 /**
572  * @brief Test some context routines
573  *
574  * @details
575  * Test Objective:
576  * - Thread context handles derived from context switches must be able to be
577  *   restored upon interrupt exit
578  *
579  * Testing techniques
580  * - Functional and black box testing
581  * - Interface testing
582  *
583  * Prerequisite Condition:
584  * - N/A
585  *
586  * Input Specifications:
587  * - N/A
588  *
589  * Test Procedure:
590  * -# Set priority of current thread to 0 as a preemptible thread
591  * -# Trap to interrupt context, get thread id of the interrupted thread and
592  *  pass back to that thread.
593  * -# Return to thread context and make sure this context is interrupted by
594  *  comparing its thread ID and the thread ID passed by isr.
595  * -# Pass command to isr to check whether the isr is executed in interrupt
596  *  context
597  * -# When return to thread context, check the return value of command.
598  *
599  * Expected Test Result:
600  * - Thread context restored upon interrupt exit
601  *
602  * Pass/Fail criteria:
603  * - Success if context of thread restored correctly, failure otherwise.
604  *
605  * Assumptions and Constraints
606  * - N/A
607  *
608  * @ingroup kernel_context_tests
609  * @see k_current_get(), k_is_in_isr()
610  */
ZTEST(context,test_ctx_thread)611 ZTEST(context, test_ctx_thread)
612 {
613 	k_tid_t self_thread_id;
614 
615 	k_thread_priority_set(k_current_get(), 0);
616 
617 	TC_PRINT("Testing k_current_get() from an ISR and thread\n");
618 
619 	self_thread_id = k_current_get();
620 	isr_info.command = THREAD_SELF_CMD;
621 	isr_info.error = 0;
622 	/* isr_info is modified by the isr_handler routine */
623 	isr_handler_trigger();
624 
625 	zassert_false(isr_info.error, "ISR detected an error");
626 
627 	zassert_equal(isr_info.data, (void *)self_thread_id,
628 		      "ISR context ID mismatch");
629 
630 	TC_PRINT("Testing k_is_in_isr() from an ISR\n");
631 	isr_info.command = EXEC_CTX_TYPE_CMD;
632 	isr_info.error = 0;
633 	isr_handler_trigger();
634 
635 	zassert_false(isr_info.error, "ISR detected an error");
636 
637 	zassert_equal(isr_info.value, K_ISR,
638 		      "isr_info.value was not K_ISR");
639 
640 	TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
641 	zassert_false(k_is_in_isr(), "Should not be in ISR context");
642 
643 	zassert_false(_current->base.prio < 0,
644 		      "Current thread should have preemptible priority: %d",
645 		      _current->base.prio);
646 
647 }
648 
649 /**
650  * @brief Test the various context/thread routines from a cooperative thread
651  *
652  * This routines tests the k_current_get() and k_is_in_isr() routines from both
653  * a thread and an ISR (that interrupted a cooperative thread). Checking those
654  * routines with preemptible threads are done elsewhere.
655  *
656  * @see k_current_get(), k_is_in_isr()
657  */
_test_kernel_thread(k_tid_t _thread_id)658 static void _test_kernel_thread(k_tid_t _thread_id)
659 {
660 	k_tid_t self_thread_id;
661 
662 	self_thread_id = k_current_get();
663 	zassert_true((self_thread_id != _thread_id), "thread id matches parent thread");
664 
665 	isr_info.command = THREAD_SELF_CMD;
666 	isr_info.error = 0;
667 	isr_handler_trigger();
668 	/*
669 	 * Either the ISR detected an error, or the ISR context ID
670 	 * does not match the interrupted thread's ID.
671 	 */
672 	zassert_false((isr_info.error || (isr_info.data != (void *)self_thread_id)),
673 		      "Thread ID taken during ISR != calling thread");
674 
675 	isr_info.command = EXEC_CTX_TYPE_CMD;
676 	isr_info.error = 0;
677 	isr_handler_trigger();
678 	zassert_false((isr_info.error || (isr_info.value != K_ISR)),
679 		      "k_is_in_isr() when called from an ISR is false");
680 
681 	zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
682 
683 	zassert_false((_current->base.prio >= 0),
684 		      "thread is not a cooperative thread");
685 }
686 
687 /**
688  *
689  * @brief Entry point to the thread's helper
690  *
691  * This routine is the entry point to the thread's helper thread. It is used to
692  * help test the behavior of the k_yield() routine.
693  *
694  * @param arg1    unused
695  * @param arg2    unused
696  * @param arg3    unused
697  *
698  */
699 
thread_helper(void * arg1,void * arg2,void * arg3)700 static void thread_helper(void *arg1, void *arg2, void *arg3)
701 {
702 	k_tid_t self_thread_id;
703 
704 	ARG_UNUSED(arg1);
705 	ARG_UNUSED(arg2);
706 	ARG_UNUSED(arg3);
707 
708 	/*
709 	 * This thread starts off at a higher priority than thread_entry().
710 	 * Thus, it should execute immediately.
711 	 */
712 	thread_evidence++;
713 
714 	/* Test that helper will yield to a thread of equal priority */
715 	self_thread_id = k_current_get();
716 
717 	/* Lower priority to that of thread_entry() */
718 	k_thread_priority_set(self_thread_id, self_thread_id->base.prio + 1);
719 
720 	k_yield();      /* Yield to thread of equal priority */
721 
722 	thread_evidence++;
723 	/* thread_evidence should now be 2 */
724 
725 }
726 
727 /**
728  * @brief Entry point to thread started by another thread
729  *
730  * This routine is the entry point to the thread started by the thread.
731  */
k_yield_entry(void * arg0,void * arg1,void * arg2)732 static void k_yield_entry(void *arg0, void *arg1, void *arg2)
733 {
734 	k_tid_t self_thread_id;
735 
736 	ARG_UNUSED(arg0);
737 	ARG_UNUSED(arg1);
738 	ARG_UNUSED(arg2);
739 
740 	thread_evidence++;      /* Prove that the thread has run */
741 	k_sem_take(&sem_thread, K_FOREVER);
742 
743 	/*
744 	 * Start a thread of higher priority. Note that since the new thread is
745 	 * being started from a thread, it will not automatically switch to the
746 	 * thread as it would if done from another thread.
747 	 */
748 	self_thread_id = k_current_get();
749 	thread_evidence = 0;
750 
751 	k_thread_create(&thread_data2, thread_stack2, THREAD_STACKSIZE,
752 			thread_helper, NULL, NULL, NULL,
753 			K_PRIO_COOP(THREAD_PRIORITY - 1), 0, K_NO_WAIT);
754 
755 	zassert_equal(thread_evidence, 0,
756 		      "Helper created at higher priority ran prematurely.");
757 
758 	/*
759 	 * Validate the thread is allowed to yield
760 	 */
761 	zassert_true(k_can_yield(), "Thread incorrectly detected it could not yield");
762 
763 	/*
764 	 * Test that the thread will yield to the higher priority helper.
765 	 * thread_evidence is still 0.
766 	 */
767 	k_yield();
768 
769 	zassert_not_equal(thread_evidence, 0,
770 			  "k_yield() did not yield to a higher priority thread: %d",
771 			  thread_evidence);
772 
773 	zassert_false((thread_evidence > 1),
774 		      "k_yield() did not yield to an equal priority thread: %d",
775 		      thread_evidence);
776 
777 	/*
778 	 * Raise the priority of thread_entry(). Calling k_yield() should
779 	 * not result in switching to the helper.
780 	 */
781 	k_thread_priority_set(self_thread_id, self_thread_id->base.prio - 1);
782 	k_yield();
783 
784 	zassert_equal(thread_evidence, 1,
785 		      "k_yield() yielded to a lower priority thread");
786 
787 	/*
788 	 * Block on sem_thread. This will allow the helper thread to
789 	 * complete. The main thread will wake this thread.
790 	 */
791 	k_sem_take(&sem_thread, K_FOREVER);
792 }
793 
kernel_thread_entry(void * _thread_id,void * arg1,void * arg2)794 static void kernel_thread_entry(void *_thread_id, void *arg1, void *arg2)
795 {
796 	ARG_UNUSED(arg1);
797 	ARG_UNUSED(arg2);
798 
799 	thread_evidence++;      /* Prove that the thread has run */
800 	k_sem_take(&sem_thread, K_FOREVER);
801 
802 	_test_kernel_thread((k_tid_t) _thread_id);
803 
804 }
805 
806 /*
807  * @brief Timeout tests
808  *
809  * Test the k_sleep() API, as well as the k_thread_create() ones.
810  */
811 struct timeout_order {
812 	void *link_in_fifo;
813 	int32_t timeout;
814 	int timeout_order;
815 	int q_order;
816 };
817 
818 struct timeout_order timeouts[] = {
819 	{ 0, 1000, 2, 0 },
820 	{ 0, 1500, 4, 1 },
821 	{ 0, 500, 0, 2 },
822 	{ 0, 750, 1, 3 },
823 	{ 0, 1750, 5, 4 },
824 	{ 0, 2000, 6, 5 },
825 	{ 0, 1250, 3, 6 },
826 };
827 
828 #define NUM_TIMEOUT_THREADS ARRAY_SIZE(timeouts)
829 static K_THREAD_STACK_ARRAY_DEFINE(timeout_stacks, NUM_TIMEOUT_THREADS,
830 				   THREAD_STACKSIZE2);
831 static struct k_thread timeout_threads[NUM_TIMEOUT_THREADS];
832 
833 /* a thread busy waits */
busy_wait_thread(void * mseconds,void * arg2,void * arg3)834 static void busy_wait_thread(void *mseconds, void *arg2, void *arg3)
835 {
836 	uint32_t usecs;
837 
838 	ARG_UNUSED(arg2);
839 	ARG_UNUSED(arg3);
840 
841 	usecs = POINTER_TO_INT(mseconds) * 1000;
842 
843 	k_busy_wait(usecs);
844 
845 	/* FIXME: Broken on Nios II, see #22956 */
846 #ifndef CONFIG_NIOS2
847 	int key = arch_irq_lock();
848 
849 	k_busy_wait(usecs);
850 	arch_irq_unlock(key);
851 #endif
852 
853 	/*
854 	 * Ideally the test should verify that the correct number of ticks
855 	 * have elapsed. However, when running under QEMU, the tick interrupt
856 	 * may be processed on a very irregular basis, meaning that far
857 	 * fewer than the expected number of ticks may occur for a given
858 	 * number of clock cycles vs. what would ordinarily be expected.
859 	 *
860 	 * Consequently, the best we can do for now to test busy waiting is
861 	 * to invoke the API and verify that it returns. (If it takes way
862 	 * too long, or never returns, the main test thread may be able to
863 	 * time out and report an error.)
864 	 */
865 
866 	k_sem_give(&reply_timeout);
867 }
868 
869 /* a thread sleeps and times out, then reports through a fifo */
thread_sleep(void * delta,void * arg2,void * arg3)870 static void thread_sleep(void *delta, void *arg2, void *arg3)
871 {
872 	int64_t timestamp;
873 	int timeout = POINTER_TO_INT(delta);
874 
875 	ARG_UNUSED(arg2);
876 	ARG_UNUSED(arg3);
877 
878 	timestamp = k_uptime_get();
879 	k_msleep(timeout);
880 	timestamp = k_uptime_get() - timestamp;
881 
882 	int slop = MAX(k_ticks_to_ms_floor64(2), 1);
883 
884 	if (timestamp < timeout || timestamp > timeout + slop) {
885 		TC_ERROR("timestamp out of range, got %d\n", (int)timestamp);
886 		return;
887 	}
888 
889 	k_sem_give(&reply_timeout);
890 }
891 
892 /* a thread is started with a delay, then it reports that it ran via a fifo */
delayed_thread(void * num,void * arg2,void * arg3)893 static void delayed_thread(void *num, void *arg2, void *arg3)
894 {
895 	struct timeout_order *timeout = &timeouts[POINTER_TO_INT(num)];
896 
897 	ARG_UNUSED(arg2);
898 	ARG_UNUSED(arg3);
899 
900 	TC_PRINT(" thread (q order: %d, t/o: %d) is running\n",
901 		 timeout->q_order, timeout->timeout);
902 
903 	k_fifo_put(&timeout_order_fifo, timeout);
904 }
905 
906 /**
907  * @brief Test timeouts
908  *
909  * @ingroup kernel_context_tests
910  *
911  * @see k_busy_wait(), k_sleep()
912  */
ZTEST(context_one_cpu,test_busy_wait)913 ZTEST(context_one_cpu, test_busy_wait)
914 {
915 	int32_t timeout;
916 	int rv;
917 
918 	timeout = 20;           /* in ms */
919 
920 	k_thread_create(&timeout_threads[0], timeout_stacks[0],
921 			THREAD_STACKSIZE2, busy_wait_thread,
922 			INT_TO_POINTER(timeout), NULL,
923 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
924 
925 	rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2 * 2));
926 
927 	zassert_false(rv, " *** thread timed out waiting for " "k_busy_wait()");
928 }
929 
930 /**
931  * @brief Test timeouts
932  *
933  * @ingroup kernel_context_tests
934  *
935  * @see k_sleep()
936  */
ZTEST(context_one_cpu,test_k_sleep)937 ZTEST(context_one_cpu, test_k_sleep)
938 {
939 	struct timeout_order *data;
940 	int32_t timeout;
941 	int rv;
942 	int i;
943 
944 
945 	timeout = 50;
946 
947 	k_thread_create(&timeout_threads[0], timeout_stacks[0],
948 			THREAD_STACKSIZE2, thread_sleep,
949 			INT_TO_POINTER(timeout), NULL,
950 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
951 
952 	rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2));
953 	zassert_equal(rv, 0, " *** thread timed out waiting for thread on "
954 		      "k_sleep().");
955 
956 	/* test k_thread_create() without cancellation */
957 	TC_PRINT("Testing k_thread_create() without cancellation\n");
958 
959 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
960 		k_thread_create(&timeout_threads[i], timeout_stacks[i],
961 				THREAD_STACKSIZE2,
962 				delayed_thread,
963 				INT_TO_POINTER(i), NULL, NULL,
964 				K_PRIO_COOP(5), 0,
965 				K_MSEC(timeouts[i].timeout));
966 	}
967 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
968 		data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
969 		zassert_not_null(data, " *** timeout while waiting for"
970 				 " delayed thread");
971 
972 		zassert_equal(data->timeout_order, i,
973 			      " *** wrong delayed thread ran (got %d, "
974 			      "expected %d)\n", data->timeout_order, i);
975 
976 		TC_PRINT(" got thread (q order: %d, t/o: %d) as expected\n",
977 			 data->q_order, data->timeout);
978 	}
979 
980 	/* ensure no more thread fire */
981 	data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
982 
983 	zassert_false(data, " *** got something unexpected in the fifo");
984 
985 	/* test k_thread_create() with cancellation */
986 	TC_PRINT("Testing k_thread_create() with cancellations\n");
987 
988 	int cancellations[] = { 0, 3, 4, 6 };
989 	int num_cancellations = ARRAY_SIZE(cancellations);
990 	int next_cancellation = 0;
991 
992 	k_tid_t delayed_threads[NUM_TIMEOUT_THREADS];
993 
994 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
995 		k_tid_t id;
996 
997 		id = k_thread_create(&timeout_threads[i], timeout_stacks[i],
998 				     THREAD_STACKSIZE2, delayed_thread,
999 				     INT_TO_POINTER(i), NULL, NULL,
1000 				     K_PRIO_COOP(5), 0,
1001 				     K_MSEC(timeouts[i].timeout));
1002 
1003 		delayed_threads[i] = id;
1004 	}
1005 
1006 	for (i = 0; i < NUM_TIMEOUT_THREADS; i++) {
1007 		int j;
1008 
1009 		if (i == cancellations[next_cancellation]) {
1010 			TC_PRINT(" cancelling "
1011 				 "[q order: %d, t/o: %d, t/o order: %d]\n",
1012 				 timeouts[i].q_order, timeouts[i].timeout, i);
1013 
1014 			for (j = 0; j < NUM_TIMEOUT_THREADS; j++) {
1015 				if (timeouts[j].timeout_order == i) {
1016 					break;
1017 				}
1018 			}
1019 
1020 			if (j < NUM_TIMEOUT_THREADS) {
1021 				k_thread_abort(delayed_threads[j]);
1022 				++next_cancellation;
1023 				continue;
1024 			}
1025 		}
1026 
1027 		data = k_fifo_get(&timeout_order_fifo, K_MSEC(2750));
1028 
1029 		zassert_not_null(data, " *** timeout while waiting for"
1030 				 " delayed thread");
1031 
1032 		zassert_equal(data->timeout_order, i,
1033 			      " *** wrong delayed thread ran (got %d, "
1034 			      "expected %d)\n", data->timeout_order, i);
1035 
1036 		TC_PRINT(" got (q order: %d, t/o: %d, t/o order %d) "
1037 			 "as expected\n", data->q_order, data->timeout,
1038 			 data->timeout_order);
1039 	}
1040 
1041 	zassert_equal(num_cancellations, next_cancellation,
1042 		      " *** wrong number of cancellations (expected %d, "
1043 		      "got %d\n", num_cancellations, next_cancellation);
1044 
1045 	/* ensure no more thread fire */
1046 	data = k_fifo_get(&timeout_order_fifo, K_MSEC(750));
1047 	zassert_false(data, " *** got something unexpected in the fifo");
1048 
1049 }
1050 
1051 /**
1052  *
1053  * @brief Test the k_yield() routine
1054  *
1055  * @ingroup kernel_context_tests
1056  *
1057  * Tests the k_yield() routine. It starts another thread
1058  * (thus also testing k_thread_create()) and checks that behavior of
1059  * k_yield() against the a higher priority thread,
1060  * a lower priority thread, and another thread of equal priority.
1061  *
1062  * @see k_yield()
1063  */
ZTEST(context_one_cpu,test_k_yield)1064 ZTEST(context_one_cpu, test_k_yield)
1065 {
1066 	thread_evidence = 0;
1067 	k_thread_priority_set(k_current_get(), 0);
1068 
1069 	k_sem_init(&sem_thread, 0, UINT_MAX);
1070 
1071 	k_thread_create(&thread_data1, thread_stack1, THREAD_STACKSIZE,
1072 			k_yield_entry, NULL, NULL,
1073 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
1074 
1075 	zassert_equal(thread_evidence, 1,
1076 		      "Thread did not execute as expected!: %d", thread_evidence);
1077 
1078 	k_sem_give(&sem_thread);
1079 	k_sem_give(&sem_thread);
1080 	k_sem_give(&sem_thread);
1081 }
1082 
1083 /**
1084  * @brief Test kernel thread creation
1085  *
1086  * @ingroup kernel_context_tests
1087  *
1088  * @see k_thread_create
1089  */
1090 
ZTEST(context_one_cpu,test_thread)1091 ZTEST(context_one_cpu, test_thread)
1092 {
1093 
1094 	k_thread_create(&thread_data3, thread_stack3, THREAD_STACKSIZE,
1095 			kernel_thread_entry, NULL, NULL,
1096 			NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, K_NO_WAIT);
1097 
1098 }
1099 
context_setup(void)1100 static void *context_setup(void)
1101 {
1102 	kernel_init_objects();
1103 
1104 	return NULL;
1105 }
1106 
1107 ZTEST_SUITE(context_cpu_idle, NULL, context_setup, NULL, NULL, NULL);
1108 
1109 ZTEST_SUITE(context, NULL, context_setup, NULL, NULL, NULL);
1110 
1111 ZTEST_SUITE(context_one_cpu, NULL, context_setup,
1112 		ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
1113