1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/device.h>
8 #include <zephyr/init.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/kernel_structs.h>
11 #include <zephyr/toolchain.h>
12 #include <zephyr/sys/__assert.h>
13 #include <zephyr/sys/sys_io.h>
14 
15 #include <xtensa/config/core-isa.h>
16 
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL);
19 
20 #include <zephyr/zsr.h>
21 #include <cavs-idc.h>
22 #include <soc.h>
23 #include <zephyr/cache.h>
24 #include <adsp_shim.h>
25 #include <adsp_memory.h>
26 #include <cpu_init.h>
27 
28 struct cpustart_rec {
29 	uint32_t        cpu;
30 	arch_cpustart_t	fn;
31 	void            *arg;
32 };
33 
34 static struct cpustart_rec start_rec;
35 const uint32_t *z_mp_start_cpu = &start_rec.cpu;
36 
37 char *z_mp_stack_top;
38 
39 /* Vestigial silliness: An old mechanism for core startup would embed
40  * a "manifest" of code to copy to LP-SRAM at startup (vs. the tiny
41  * trampoline we use here).  This was constructed in the linker
42  * script, and the first word would encode the number of entries.  As
43  * it happens, SOF still emits the code to copy this data, so it needs
44  * to see this symbol point to a zero.
45  */
46 uint32_t _loader_storage_manifest_start;
47 
48 /* Simple array of CPUs that are active and available for an IPI.  The
49  * IDC interrupt is ALSO used to bring a CPU out of reset, so we need
50  * to be absolutely sure we don't try to IPI a CPU that isn't ready to
51  * start, or else we'll launch it into garbage and crash the DSP.
52  */
53 bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS];
54 
55 #define NOP4 "nop; nop; nop; nop;"
56 #define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4
57 #define NOP128 NOP32 NOP32 NOP32 NOP32
58 /* Tiny assembly stub for calling z_mp_entry() on the auxiliary CPUs.
59  * Mask interrupts, clear the register window state and set the stack
60  * pointer.  This represents the minimum work required to run C code
61  * safely.
62  *
63  * Note that alignment is absolutely required: the IDC protocol passes
64  * only the upper 30 bits of the address to the second CPU.
65  */
66 __asm__(".section .text.z_soc_mp_asm_entry, \"x\" \n\t"
67 	".align 4                   \n\t"
68 	".global z_soc_mp_asm_entry \n\t"
69 	"z_soc_mp_asm_entry:        \n\t"
70 	"  movi  a0, 0x4002f        \n\t" /* WOE | UM | INTLEVEL(max) */
71 	"  wsr   a0, PS             \n\t"
72 	"  movi  a0, 0              \n\t"
73 	"  wsr   a0, WINDOWBASE     \n\t"
74 	"  movi  a0, 1              \n\t"
75 	"  wsr   a0, WINDOWSTART    \n\t"
76 	"  rsync                    \n\t"
77 	"  movi  a1, z_mp_start_cpu \n\t"
78 	"  l32i  a1, a1, 0          \n\t"
79 	"  l32i  a1, a1, 0          \n\t"
80 	"  rsr   a2, PRID           \n\t"
81 	"  sub   a2, a2, a1         \n\t"
82 	"  bnez  a2, soc_mp_idle    \n\t"
83 	"  movi  a1, z_mp_stack_top \n\t"
84 	"  l32i  a1, a1, 0          \n\t"
85 	"  call4 z_mp_entry         \n\t"
86 	"soc_mp_idle:               \n\t"
87 #ifdef CONFIG_XTENSA_WAITI_BUG
88 	NOP128
89 	"  isync                    \n\t"
90 	"  extw                     \n\t"
91 #endif
92 	"  waiti 0                  \n\t" /* Power-gating is allowed, we'll exit via reset */
93 	"  j soc_mp_idle            \n\t");
94 
95 #undef NOP128
96 #undef NOP32
97 #undef NOP4
98 
z_mp_entry(void)99 static __imr void __used z_mp_entry(void)
100 {
101 	cpu_early_init();
102 	/* Set up the CPU pointer. */
103 	_cpu_t *cpu = &_kernel.cpus[start_rec.cpu];
104 
105 	__asm__ volatile("wsr %0, " ZSR_CPU_STR :: "r"(cpu));
106 
107 	soc_mp_startup(start_rec.cpu);
108 	soc_cpus_active[start_rec.cpu] = true;
109 	start_rec.fn(start_rec.arg);
110 	__ASSERT(false, "arch_cpu_start() handler should never return");
111 }
112 
mp_resume_entry(void)113 void mp_resume_entry(void)
114 {
115 	start_rec.fn(start_rec.arg);
116 }
117 
arch_cpu_active(int cpu_num)118 bool arch_cpu_active(int cpu_num)
119 {
120 	return soc_cpus_active[cpu_num];
121 }
122 
arch_cpu_start(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)123 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
124 		    arch_cpustart_t fn, void *arg)
125 {
126 	__ASSERT_NO_MSG(!soc_cpus_active[cpu_num]);
127 
128 	start_rec.cpu = cpu_num;
129 	start_rec.fn = fn;
130 	start_rec.arg = arg;
131 
132 	z_mp_stack_top = K_KERNEL_STACK_BUFFER(stack) + sz;
133 
134 	soc_start_core(cpu_num);
135 }
136 
137 /* Fallback stub for external SOF code */
cavs_idc_smp_init(const struct device * dev)138 __imr int cavs_idc_smp_init(const struct device *dev)
139 {
140 	ARG_UNUSED(dev);
141 	return 0;
142 }
143