1 /* Copyright (c) 2022 Intel Corporation
2  * SPDX-License-Identifier: Apache-2.0
3  */
4 #include <stdlib.h>
5 #include <zephyr/kernel.h>
6 #include <zephyr/kernel/smp.h>
7 #include <zephyr/ztest.h>
8 #include <zephyr/cache.h>
9 
10 #include <intel_adsp_ipc.h>
11 #include "tests.h"
12 
13 #define RUN_ON_STACKSZ 2048
14 #define HAR_STACKSZ    1024
15 #define HAR_PRIORITY   7
16 
17 /* Utility for spin-polled loops.  Avoids spamming shared resources
18  * like SRAM or MMIO registers
19  */
delay_relax(void)20 static ALWAYS_INLINE void delay_relax(void)
21 {
22 	for (volatile int j = 0; j < 1000; j++) {
23 	}
24 }
25 
run_on_cpu_threadfn(void * a,void * b,void * c)26 static void run_on_cpu_threadfn(void *a, void *b, void *c)
27 {
28 	void (*fn)(void *) = a;
29 	void *arg = b;
30 	volatile bool *done_flag = c;
31 
32 	fn(arg);
33 	*done_flag = true;
34 }
35 
36 static struct k_thread thread_har;
37 static K_THREAD_STACK_DEFINE(tstack_har, HAR_STACKSZ);
38 
39 static struct k_thread run_on_threads[CONFIG_MP_MAX_NUM_CPUS];
40 static K_THREAD_STACK_ARRAY_DEFINE(run_on_stacks, CONFIG_MP_MAX_NUM_CPUS, RUN_ON_STACKSZ);
41 static volatile bool run_on_flags[CONFIG_MP_MAX_NUM_CPUS];
42 
43 static uint32_t clk_ratios[CONFIG_MP_MAX_NUM_CPUS];
44 
run_on_cpu(int cpu,void (* fn)(void *),void * arg,bool wait)45 static void run_on_cpu(int cpu, void (*fn)(void *), void *arg, bool wait)
46 {
47 	__ASSERT_NO_MSG(cpu < arch_num_cpus());
48 
49 	/* Highest priority isn't actually guaranteed to preempt
50 	 * whatever's running, but we assume the test hasn't laid
51 	 * traps for itself.
52 	 */
53 	k_thread_create(&run_on_threads[cpu], run_on_stacks[cpu], RUN_ON_STACKSZ,
54 			run_on_cpu_threadfn, fn, arg, (void *)&run_on_flags[cpu],
55 			K_HIGHEST_THREAD_PRIO, 0, K_FOREVER);
56 	k_thread_cpu_mask_clear(&run_on_threads[cpu]);
57 	k_thread_cpu_mask_enable(&run_on_threads[cpu], cpu);
58 	run_on_flags[cpu] = false;
59 	k_thread_start(&run_on_threads[cpu]);
60 
61 	if (wait) {
62 		while (!run_on_flags[cpu]) {
63 			delay_relax();
64 			k_yield();
65 		}
66 		k_thread_abort(&run_on_threads[cpu]);
67 	}
68 }
69 
ccount(void)70 static inline uint32_t ccount(void)
71 {
72 	uint32_t ret;
73 
74 	__asm__ volatile("rsr %0, CCOUNT" : "=r"(ret));
75 	return ret;
76 }
77 
core_smoke(void * arg)78 static void core_smoke(void *arg)
79 {
80 	int cpu = (int) arg;
81 	volatile int tag;
82 	static int static_tag;
83 
84 	zassert_equal(cpu, arch_curr_cpu()->id, "wrong cpu");
85 
86 	/* Un/cached regions should be configured and distinct */
87 	zassert_equal(&tag, sys_cache_cached_ptr_get((void *)&tag),
88 		      "stack memory not cached");
89 	zassert_not_equal(&tag, sys_cache_uncached_ptr_get((void *)&tag),
90 			  "stack memory not cached");
91 	zassert_not_equal(&static_tag, sys_cache_cached_ptr_get((void *)&static_tag),
92 		      "stack memory not cached");
93 	zassert_equal(&static_tag, sys_cache_uncached_ptr_get((void *)&static_tag),
94 			  "stack memory not cached");
95 
96 	/* Un/cached regions should be working */
97 	printk(" Cache behavior check\n");
98 	volatile int *ctag = (volatile int *)sys_cache_cached_ptr_get((void *)&tag);
99 	volatile int *utag = (volatile int *)sys_cache_uncached_ptr_get((void *)&tag);
100 
101 	tag = 99;
102 	zassert_true(*ctag == 99, "variable is cached");
103 	*utag = 42;
104 	zassert_true(*ctag == 99, "uncached assignment unexpectedly affected cache");
105 	zassert_true(*utag == 42, "uncached memory affected unexpectedly");
106 	sys_cache_data_flush_range((void *)ctag, sizeof(*ctag));
107 	zassert_true(*utag == 99, "cache flush didn't work");
108 
109 	/* Calibrate clocks */
110 	uint32_t cyc1, cyc0 = k_cycle_get_32();
111 	uint32_t cc1, cc0 = ccount();
112 
113 	do {
114 		cyc1 = k_cycle_get_32();
115 		cc1 = ccount();
116 	} while ((cc1 - cc0) < 1000 || (cyc1 - cyc0) < 1000);
117 
118 	clk_ratios[cpu] = ((cc1 - cc0) * 1000) / (cyc1 - cyc0);
119 	printk(" CCOUNT/WALCLK ratio %d.%3.3d\n",
120 	       clk_ratios[cpu] / 1000, clk_ratios[cpu] % 1000);
121 
122 	for (int i = 0; i < cpu; i++) {
123 		int32_t diff = MAX(1, abs(clk_ratios[i] - clk_ratios[cpu]));
124 
125 		zassert_true((clk_ratios[cpu] / diff) > 100,
126 			     "clocks off by more than 1%");
127 	}
128 
129 	/* Check tight loop performance to validate instruction cache */
130 	uint32_t count0 = 1000, count, dt, insns;
131 
132 	count = count0;
133 	cyc0 = ccount();
134 	__asm__ volatile("1: addi %0, %0, -1; bnez %0, 1b" : "+r"(count));
135 	cyc1 = ccount();
136 	dt = cyc1 - cyc0;
137 	insns = count0 * 2;
138 	zassert_true((dt / insns) < 3,
139 		     "instruction rate too slow, icache disabled?");
140 	printk(" CPI = %d.%2.2d\n", dt / insns, ((1000 * dt) / insns) % 1000);
141 }
142 
ZTEST(intel_adsp_boot,test_4th_cpu_behavior)143 ZTEST(intel_adsp_boot, test_4th_cpu_behavior)
144 {
145 	unsigned int num_cpus = arch_num_cpus();
146 
147 	for (int i = 0; i < num_cpus; i++) {
148 		printk("Per-CPU smoke test %d...\n", i);
149 		run_on_cpu(i, core_smoke, (void *)i, true);
150 	}
151 }
152 
alive_fn(void * arg)153 static void alive_fn(void *arg)
154 {
155 	*(bool *)arg = true;
156 }
157 
halt_and_restart(int cpu)158 static void halt_and_restart(int cpu)
159 {
160 	printk("halt/restart core %d...\n", cpu);
161 	static bool alive_flag;
162 	uint32_t all_cpus = BIT(arch_num_cpus()) - 1;
163 	int ret;
164 
165 	/* On older hardware we need to get the host to turn the core
166 	 * off. Construct an ADSPCS with only this core disabled
167 	 */
168 	if (!IS_ENABLED(CONFIG_SOC_INTEL_CAVS_V25)) {
169 		intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_ADSPCS,
170 				     (all_cpus & ~BIT(cpu)) << 16);
171 	}
172 
173 	ret = soc_adsp_halt_cpu(cpu);
174 	zassert_ok(ret, "Couldn't halt CPU");
175 
176 	alive_flag = false;
177 	run_on_cpu(cpu, alive_fn, &alive_flag, false);
178 	k_msleep(100);
179 	zassert_false(alive_flag, "cpu didn't halt");
180 
181 	if (!IS_ENABLED(CONFIG_SOC_INTEL_CAVS_V25)) {
182 		/* Likewise need to ask the host to turn it back on,
183 		 * and give it some time to spin up before we hit it.
184 		 * We don't have a return message wired to be notified
185 		 * of completion.
186 		 */
187 		intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_ADSPCS,
188 				     all_cpus << 16);
189 		k_msleep(50);
190 	}
191 
192 	k_smp_cpu_start(cpu, NULL, NULL);
193 
194 	/* Startup can be slow */
195 	k_msleep(50);
196 
197 	AWAIT(alive_flag == true);
198 
199 	k_thread_abort(&run_on_threads[cpu]);
200 }
201 
halt_and_restart_thread(void * p1,void * p2,void * p3)202 void halt_and_restart_thread(void *p1, void *p2, void *p3)
203 {
204 	unsigned int num_cpus = arch_num_cpus();
205 
206 	for (int i = 1; i < num_cpus; i++) {
207 		halt_and_restart(i);
208 	}
209 }
210 
ZTEST(intel_adsp_boot,test_2nd_cpu_halt)211 ZTEST(intel_adsp_boot, test_2nd_cpu_halt)
212 {
213 	int ret;
214 
215 	/* Obviously this only works on CPU0. So, we create a thread pinned
216 	 * to CPU0 to effectively run the test.
217 	 */
218 	k_thread_create(&thread_har, tstack_har, HAR_STACKSZ,
219 			halt_and_restart_thread, NULL, NULL, NULL,
220 			HAR_PRIORITY, 0, K_FOREVER);
221 	ret = k_thread_cpu_pin(&thread_har, 0);
222 	zassert_ok(ret, "Couldn't pin thread to CPU 0, test can't be run");
223 	k_thread_start(&thread_har);
224 
225 	k_thread_join(&thread_har, K_FOREVER);
226 }
227