1 /*
2  * Copyright (c) 2023 Syntacore. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "zephyr/ztest_test.h"
8 #include <zephyr/kernel.h>
9 #include <zephyr/ztest.h>
10 #include <zephyr/spinlock.h>
11 
12 #ifdef CONFIG_SCHED_CPU_MASK
13 
14 #define STACK_SIZE	(8 * 1024)
15 #define CORES_NUM	CONFIG_MP_MAX_NUM_CPUS
16 #define FAIRNESS_TEST_CYCLES_PER_CORE 1000
17 
18 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS > 1);
19 
20 static K_THREAD_STACK_ARRAY_DEFINE(tstack, CORES_NUM, STACK_SIZE);
21 static struct k_thread tthread[CORES_NUM];
22 static uint32_t spinlock_grabbed[CORES_NUM];
23 static atomic_t fairness_test_cycles;
24 static struct k_spinlock lock;
25 static atomic_t start_sync;
26 
get_thread(uint8_t core_id)27 static inline struct k_thread *get_thread(uint8_t core_id)
28 {
29 	return &tthread[core_id];
30 }
31 
32 /**
33  * @brief Execution thread which runs concurrently on each CPU in the system
34  *
35  * @param [in] arg1 - thread argument 1
36  * @param [in] arg2 - thread argument 2
37  * @param [in] arg3 - thread argument 3
38  */
test_thread(void * arg1,void * arg2,void * arg3)39 static void test_thread(void *arg1, void *arg2, void *arg3)
40 {
41 	int core_id = (uintptr_t)arg1;
42 
43 	/* Synchronize all the cores as much as possible */
44 	int key = arch_irq_lock();
45 
46 	atomic_dec(&start_sync);
47 	while (atomic_get(&start_sync) != 0)
48 		;
49 
50 	/*
51 	 * Run the test: let the cores contend for the spinlock and
52 	 * collect the spinlock acquisition statistics
53 	 */
54 	do {
55 		k_spinlock_key_t spinlock_key = k_spin_lock(&lock);
56 
57 		if (atomic_get(&fairness_test_cycles) == 0) {
58 			k_spin_unlock(&lock, spinlock_key);
59 			arch_irq_unlock(key);
60 			return;
61 		}
62 
63 		spinlock_grabbed[core_id]++;
64 
65 		/* Imitate some work which takes time */
66 		volatile uint32_t countdown = 10000;
67 
68 		while (countdown--)
69 			;
70 
71 		atomic_dec(&fairness_test_cycles);
72 
73 		k_spin_unlock(&lock, spinlock_key);
74 	} while (atomic_get(&fairness_test_cycles) != 0);
75 
76 	arch_irq_unlock(key);
77 }
78 
test_init(void)79 static void test_init(void)
80 {
81 	memset(tthread, 0x00, sizeof(tthread));
82 	memset(tstack, 0x00, sizeof(tstack));
83 	atomic_set(&start_sync, CORES_NUM);
84 	atomic_set(&fairness_test_cycles, FAIRNESS_TEST_CYCLES_PER_CORE * CORES_NUM);
85 
86 	for (uintptr_t core_id = 0; core_id < CORES_NUM; core_id++) {
87 		struct k_thread *thread = get_thread(core_id);
88 
89 		k_thread_create(thread, tstack[core_id], STACK_SIZE,
90 		   (k_thread_entry_t)test_thread,
91 		   (void *)core_id, NULL, NULL,
92 		   K_PRIO_COOP(10), 0, K_FOREVER);
93 
94 		/*
95 		 * Pin each thread to a particular CPU core.
96 		 * The larger the core's memory access latency in comparison to the
97 		 * other cores - the less chances to win a contention for the spinlock
98 		 * this core will have in case the spinlock implementation doesn't
99 		 * provide acquisition fairness.
100 		 */
101 		k_thread_cpu_pin(thread, core_id);
102 	}
103 }
104 
105 /**
106  * @brief Test spinlock acquisition fairness
107  *
108  * @details This test verifies a spinlock acquisition fairness in relation
109  *		to the cores contending for the spinlock. Memory access latency may
110  *		vary between the CPU cores, so that some CPUs reach the spinlock faster
111  *		than the others and depending on spinlock implementation may get
112  *		higher chance to win the contention for the spinlock than the other
113  *		cores, making them to starve.
114  *		This effect may become critical for some real-life platforms
115  *		(e.g. NUMA) resulting in performance loss or even a live-lock,
116  *		when a single CPU is continuously winning the contention.
117  *		This test ensures that the probability to win the contention for a
118  *		spinlock is evenly distributed between all of the contending cores.
119  *
120  * @ingroup kernel_spinlock_tests
121  *
122  * @see k_spin_lock(), k_spin_unlock()
123  */
ZTEST(spinlock,test_spinlock_fairness)124 ZTEST(spinlock, test_spinlock_fairness)
125 {
126 	test_init();
127 
128 	/* Launching all the threads */
129 	for (uint8_t core_id = 0; core_id < CORES_NUM; core_id++) {
130 		struct k_thread *thread = get_thread(core_id);
131 
132 		k_thread_start(thread);
133 	}
134 	/* Waiting for all the threads to complete */
135 	for (uint8_t core_id = 0; core_id < CORES_NUM; core_id++) {
136 		struct k_thread *thread = get_thread(core_id);
137 
138 		k_thread_join(thread, K_FOREVER);
139 	}
140 
141 	/* Print statistics */
142 	for (uint8_t core_id = 0; core_id < CORES_NUM; core_id++) {
143 		printk("CPU%u acquired spinlock %u times, expected %u\n",
144 			core_id, spinlock_grabbed[core_id], FAIRNESS_TEST_CYCLES_PER_CORE);
145 	}
146 
147 	/* Verify spinlock acquisition fairness */
148 	for (uint8_t core_id = 0; core_id < CORES_NUM; core_id++) {
149 		zassert_false(spinlock_grabbed[core_id] < FAIRNESS_TEST_CYCLES_PER_CORE,
150 			"CPU%d starved on a spinlock: acquired %u times, expected %u\n",
151 			core_id, spinlock_grabbed[core_id], FAIRNESS_TEST_CYCLES_PER_CORE);
152 	}
153 }
154 
155 #endif /* CONFIG_SCHED_CPU_MASK */
156