1 /* Copyright (c) 2022 Intel Corporation
2  * SPDX-License-Identifier: Apache-2.0
3  */
4 #include <stdlib.h>
5 #include <zephyr/kernel.h>
6 #include <zephyr/ztest.h>
7 #include <zephyr/cache.h>
8 
9 #include <intel_adsp_ipc.h>
10 #include "tests.h"
11 
12 #define RUN_ON_STACKSZ 2048
13 #define HAR_STACKSZ    1024
14 #define HAR_PRIORITY   7
15 
16 /* Utility for spin-polled loops.  Avoids spamming shared resources
17  * like SRAM or MMIO registers
18  */
delay_relax(void)19 static ALWAYS_INLINE void delay_relax(void)
20 {
21 	for (volatile int j = 0; j < 1000; j++) {
22 	}
23 }
24 
run_on_cpu_threadfn(void * a,void * b,void * c)25 static void run_on_cpu_threadfn(void *a, void *b, void *c)
26 {
27 	void (*fn)(void *) = a;
28 	void *arg = b;
29 	volatile bool *done_flag = c;
30 
31 	fn(arg);
32 	*done_flag = true;
33 }
34 
35 static struct k_thread thread_har;
36 static K_THREAD_STACK_DEFINE(tstack_har, HAR_STACKSZ);
37 
38 static struct k_thread run_on_threads[CONFIG_MP_MAX_NUM_CPUS];
39 static K_THREAD_STACK_ARRAY_DEFINE(run_on_stacks, CONFIG_MP_MAX_NUM_CPUS, RUN_ON_STACKSZ);
40 static volatile bool run_on_flags[CONFIG_MP_MAX_NUM_CPUS];
41 
42 static uint32_t clk_ratios[CONFIG_MP_MAX_NUM_CPUS];
43 
run_on_cpu(int cpu,void (* fn)(void *),void * arg,bool wait)44 static void run_on_cpu(int cpu, void (*fn)(void *), void *arg, bool wait)
45 {
46 	__ASSERT_NO_MSG(cpu < arch_num_cpus());
47 
48 	/* Highest priority isn't actually guaranteed to preempt
49 	 * whatever's running, but we assume the test hasn't laid
50 	 * traps for itself.
51 	 */
52 	k_thread_create(&run_on_threads[cpu], run_on_stacks[cpu], RUN_ON_STACKSZ,
53 			run_on_cpu_threadfn, fn, arg, (void *)&run_on_flags[cpu],
54 			K_HIGHEST_THREAD_PRIO, 0, K_FOREVER);
55 	k_thread_cpu_mask_clear(&run_on_threads[cpu]);
56 	k_thread_cpu_mask_enable(&run_on_threads[cpu], cpu);
57 	run_on_flags[cpu] = false;
58 	k_thread_start(&run_on_threads[cpu]);
59 
60 	if (wait) {
61 		while (!run_on_flags[cpu]) {
62 			delay_relax();
63 			k_yield();
64 		}
65 		k_thread_abort(&run_on_threads[cpu]);
66 	}
67 }
68 
ccount(void)69 static inline uint32_t ccount(void)
70 {
71 	uint32_t ret;
72 
73 	__asm__ volatile("rsr %0, CCOUNT" : "=r"(ret));
74 	return ret;
75 }
76 
core_smoke(void * arg)77 static void core_smoke(void *arg)
78 {
79 	int cpu = (int) arg;
80 	volatile int tag;
81 	static int static_tag;
82 
83 	zassert_equal(cpu, arch_curr_cpu()->id, "wrong cpu");
84 
85 	/* Un/cached regions should be configured and distinct */
86 	zassert_equal(&tag, arch_xtensa_cached_ptr((void *)&tag),
87 		      "stack memory not cached");
88 	zassert_not_equal(&tag, arch_xtensa_uncached_ptr((void *)&tag),
89 			  "stack memory not cached");
90 	zassert_not_equal(&static_tag, arch_xtensa_cached_ptr((void *)&static_tag),
91 		      "stack memory not cached");
92 	zassert_equal(&static_tag, arch_xtensa_uncached_ptr((void *)&static_tag),
93 			  "stack memory not cached");
94 
95 	/* Un/cached regions should be working */
96 	printk(" Cache behavior check\n");
97 	volatile int *ctag = (volatile int *)arch_xtensa_cached_ptr((void *)&tag);
98 	volatile int *utag = (volatile int *)arch_xtensa_uncached_ptr((void *)&tag);
99 
100 	tag = 99;
101 	zassert_true(*ctag == 99, "variable is cached");
102 	*utag = 42;
103 	zassert_true(*ctag == 99, "uncached assignment unexpectedly affected cache");
104 	zassert_true(*utag == 42, "uncached memory affected unexpectedly");
105 	sys_cache_data_flush_range((void *)ctag, sizeof(*ctag));
106 	zassert_true(*utag == 99, "cache flush didn't work");
107 
108 	/* Calibrate clocks */
109 	uint32_t cyc1, cyc0 = k_cycle_get_32();
110 	uint32_t cc1, cc0 = ccount();
111 
112 	do {
113 		cyc1 = k_cycle_get_32();
114 		cc1 = ccount();
115 	} while ((cc1 - cc0) < 1000 || (cyc1 - cyc0) < 1000);
116 
117 	clk_ratios[cpu] = ((cc1 - cc0) * 1000) / (cyc1 - cyc0);
118 	printk(" CCOUNT/WALCLK ratio %d.%3.3d\n",
119 	       clk_ratios[cpu] / 1000, clk_ratios[cpu] % 1000);
120 
121 	for (int i = 0; i < cpu; i++) {
122 		int32_t diff = MAX(1, abs(clk_ratios[i] - clk_ratios[cpu]));
123 
124 		zassert_true((clk_ratios[cpu] / diff) > 100,
125 			     "clocks off by more than 1%");
126 	}
127 
128 	/* Check tight loop performance to validate instruction cache */
129 	uint32_t count0 = 1000, count, dt, insns;
130 
131 	count = count0;
132 	cyc0 = ccount();
133 	__asm__ volatile("1: addi %0, %0, -1; bnez %0, 1b" : "+r"(count));
134 	cyc1 = ccount();
135 	dt = cyc1 - cyc0;
136 	insns = count0 * 2;
137 	zassert_true((dt / insns) < 3,
138 		     "instruction rate too slow, icache disabled?");
139 	printk(" CPI = %d.%2.2d\n", dt / insns, ((1000 * dt) / insns) % 1000);
140 }
141 
ZTEST(intel_adsp_boot,test_4th_cpu_behavior)142 ZTEST(intel_adsp_boot, test_4th_cpu_behavior)
143 {
144 	unsigned int num_cpus = arch_num_cpus();
145 
146 	for (int i = 0; i < num_cpus; i++) {
147 		printk("Per-CPU smoke test %d...\n", i);
148 		run_on_cpu(i, core_smoke, (void *)i, true);
149 	}
150 }
151 
alive_fn(void * arg)152 static void alive_fn(void *arg)
153 {
154 	*(bool *)arg = true;
155 }
156 
halt_and_restart(int cpu)157 static void halt_and_restart(int cpu)
158 {
159 	printk("halt/restart core %d...\n", cpu);
160 	static bool alive_flag;
161 	uint32_t all_cpus = BIT(arch_num_cpus()) - 1;
162 	int ret;
163 
164 	/* On older hardware we need to get the host to turn the core
165 	 * off. Construct an ADSPCS with only this core disabled
166 	 */
167 	if (!IS_ENABLED(CONFIG_SOC_INTEL_CAVS_V25)) {
168 		intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_ADSPCS,
169 				     (all_cpus & ~BIT(cpu)) << 16);
170 	}
171 
172 	ret = soc_adsp_halt_cpu(cpu);
173 	zassert_ok(ret, "Couldn't halt CPU");
174 
175 	alive_flag = false;
176 	run_on_cpu(cpu, alive_fn, &alive_flag, false);
177 	k_msleep(100);
178 	zassert_false(alive_flag, "cpu didn't halt");
179 
180 	if (!IS_ENABLED(CONFIG_SOC_INTEL_CAVS_V25)) {
181 		/* Likewise need to ask the host to turn it back on,
182 		 * and give it some time to spin up before we hit it.
183 		 * We don't have a return message wired to be notified
184 		 * of completion.
185 		 */
186 		intel_adsp_ipc_send_message(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_ADSPCS,
187 				     all_cpus << 16);
188 		k_msleep(50);
189 	}
190 
191 	z_smp_start_cpu(cpu);
192 
193 	/* Startup can be slow */
194 	k_msleep(50);
195 
196 	AWAIT(alive_flag == true);
197 
198 	k_thread_abort(&run_on_threads[cpu]);
199 }
200 
halt_and_restart_thread(void * p1,void * p2,void * p3)201 void halt_and_restart_thread(void *p1, void *p2, void *p3)
202 {
203 	unsigned int num_cpus = arch_num_cpus();
204 
205 	for (int i = 1; i < num_cpus; i++) {
206 		halt_and_restart(i);
207 	}
208 }
209 
ZTEST(intel_adsp_boot,test_2nd_cpu_halt)210 ZTEST(intel_adsp_boot, test_2nd_cpu_halt)
211 {
212 	int ret;
213 
214 	/* Obviously this only works on CPU0. So, we create a thread pinned
215 	 * to CPU0 to effectively run the test.
216 	 */
217 	k_thread_create(&thread_har, tstack_har, HAR_STACKSZ,
218 			halt_and_restart_thread, NULL, NULL, NULL,
219 			HAR_PRIORITY, 0, K_FOREVER);
220 	ret = k_thread_cpu_pin(&thread_har, 0);
221 	zassert_ok(ret, "Couldn't pin thread to CPU 0, test can't be run");
222 	k_thread_start(&thread_har);
223 
224 	k_thread_join(&thread_har, K_FOREVER);
225 }
226