1 /*
2  * Copyright (c) 2018-2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <device.h>
8 #include <init.h>
9 #include <kernel.h>
10 #include <kernel_structs.h>
11 #include <sys/sys_io.h>
12 #include <sys/__assert.h>
13 #include <xtensa/corebits.h>
14 
15 #include <logging/log.h>
16 LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL);
17 
18 #include "soc.h"
19 #include "memory.h"
20 
21 #ifdef CONFIG_SCHED_IPI_SUPPORTED
22 #include <drivers/ipm.h>
23 #include <ipm/ipm_cavs_idc.h>
24 
25 static const struct device *idc;
26 #endif
27 extern void __start(void);
28 
29 struct cpustart_rec {
30 	uint32_t		cpu;
31 	arch_cpustart_t	fn;
32 	char		*stack_top;
33 	void		*arg;
34 	uint32_t		vecbase;
35 	uint32_t		alive;
36 	/* padding to cache line */
37 	uint8_t		padding[XCHAL_DCACHE_LINESIZE - 6 * 4];
38 };
39 
40 static __aligned(XCHAL_DCACHE_LINESIZE)
41 struct cpustart_rec start_rec;
42 
43 static void *mp_top;
44 
mp_entry2(void)45 static void mp_entry2(void)
46 {
47 	volatile int ps, ie;
48 
49 	/* Copy over VECBASE from the main CPU for an initial value
50 	 * (will need to revisit this if we ever allow a user API to
51 	 * change interrupt vectors at runtime).  Make sure interrupts
52 	 * are locally disabled, then synthesize a PS value that will
53 	 * enable them for the user code to pass to irq_unlock()
54 	 * later.
55 	 */
56 	__asm__ volatile("rsr.PS %0" : "=r"(ps));
57 	ps &= ~(PS_EXCM_MASK | PS_INTLEVEL_MASK);
58 	__asm__ volatile("wsr.PS %0" : : "r"(ps));
59 
60 	ie = 0;
61 	__asm__ volatile("wsr.INTENABLE %0" : : "r"(ie));
62 	__asm__ volatile("wsr.VECBASE %0" : : "r"(start_rec.vecbase));
63 	__asm__ volatile("rsync");
64 
65 	/* Set up the CPU pointer. */
66 	_cpu_t *cpu = &_kernel.cpus[start_rec.cpu];
67 
68 	__asm__ volatile(
69 		"wsr." CONFIG_XTENSA_KERNEL_CPU_PTR_SR " %0" : : "r"(cpu));
70 
71 #ifdef CONFIG_IPM_CAVS_IDC
72 	/* Interrupt must be enabled while running on current core */
73 	irq_enable(XTENSA_IRQ_NUMBER(DT_IRQN(DT_INST(0, intel_cavs_idc))));
74 #endif /* CONFIG_IPM_CAVS_IDC */
75 
76 	start_rec.alive = 1;
77 	SOC_DCACHE_FLUSH(&start_rec, sizeof(start_rec));
78 
79 	start_rec.fn(start_rec.arg);
80 
81 #if CONFIG_MP_NUM_CPUS == 1
82 	/* CPU#1 can be under manual control running custom functions
83 	 * instead of participating in general thread execution.
84 	 * Put the CPU into idle after those functions return
85 	 * so this won't return.
86 	 */
87 	for (;;) {
88 		k_cpu_idle();
89 	}
90 #endif
91 }
92 
93 /* Defines a locally callable "function" named mp_stack_switch().  The
94  * first argument (in register a2 post-ENTRY) is the new stack pointer
95  * to go into register a1.  The second (a3) is the entry point.
96  * Because this never returns, a0 is used as a scratch register then
97  * set to zero for the called function (a null return value is the
98  * signal for "top of stack" to the debugger).
99  */
100 void mp_stack_switch(void *stack, void *entry);
101 __asm__("\n"
102 	".align 4		\n"
103 	"mp_stack_switch:	\n\t"
104 
105 	"entry a1, 16		\n\t"
106 
107 	"movi a0, 0		\n\t"
108 
109 	"jx a3			\n\t");
110 
111 /* Carefully constructed to use no stack beyond compiler-generated ABI
112  * instructions. Stack pointer is pointing to __stack at this point.
113  */
z_mp_entry(void)114 void z_mp_entry(void)
115 {
116 	mp_stack_switch(mp_top, mp_entry2);
117 }
118 
arch_start_cpu(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)119 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
120 		    arch_cpustart_t fn, void *arg)
121 {
122 	volatile struct soc_dsp_shim_regs *dsp_shim_regs =
123 		(volatile struct soc_dsp_shim_regs *)SOC_DSP_SHIM_REG_BASE;
124 	volatile struct soc_global_regs *soc_glb_regs =
125 		(volatile struct soc_global_regs *)SOC_S1000_GLB_CTRL_BASE;
126 	uint32_t vecbase;
127 
128 	__ASSERT(cpu_num == 1, "Intel S1000 supports only two CPUs!");
129 
130 	/* Setup data to boot core #1 */
131 	__asm__ volatile("rsr.VECBASE %0\n\t" : "=r"(vecbase));
132 
133 	start_rec.cpu = cpu_num;
134 	start_rec.fn = fn;
135 	start_rec.stack_top = Z_THREAD_STACK_BUFFER(stack) + sz;
136 	start_rec.arg = arg;
137 	start_rec.vecbase = vecbase;
138 	start_rec.alive = 0;
139 
140 	mp_top = Z_THREAD_STACK_BUFFER(stack) + sz;
141 
142 	SOC_DCACHE_FLUSH(&start_rec, sizeof(start_rec));
143 
144 #ifdef CONFIG_SCHED_IPI_SUPPORTED
145 	idc = device_get_binding(DT_LABEL(DT_INST(0, intel_cavs_idc)));
146 #endif
147 
148 	/*
149 	 * SoC Boot ROM has hard-coded address for boot vector in LP-SRAM,
150 	 * and will jump unconditionally to it. So power up the LP-SRAM
151 	 * and set the vector.
152 	 */
153 	sys_write32(0x0, SOC_L2RAM_LOCAL_MEM_REG_LSPGCTL);
154 	*((uint32_t *)LPSRAM_BOOT_VECTOR_ADDR) = (uint32_t)__start;
155 
156 	/* Disable power gating for DSP core #cpu_num */
157 	dsp_shim_regs->pwrctl |= SOC_PWRCTL_DISABLE_PWR_GATING_DSP1;
158 
159 	/*
160 	 * Since we do not know the status of the core,
161 	 * power it down and force it into reset and stall.
162 	 */
163 	soc_glb_regs->cavs_dsp1power_control |=
164 		SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CRST |
165 		SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CSTALL;
166 
167 	soc_glb_regs->cavs_dsp1power_control &=
168 		~SOC_S1000_GLB_CTRL_DSP1_PWRCTL_SPA;
169 
170 	/* Wait for core power down */
171 	while ((soc_glb_regs->cavs_dsp1power_control &
172 		SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CPA) != 0) {
173 	}
174 
175 	/* Now power up the core */
176 	soc_glb_regs->cavs_dsp1power_control |=
177 		SOC_S1000_GLB_CTRL_DSP1_PWRCTL_SPA;
178 
179 	/* Wait for core power up*/
180 	while ((soc_glb_regs->cavs_dsp1power_control &
181 		SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CPA) == 0) {
182 	}
183 
184 	/* Then step out of reset, and un-stall */
185 	soc_glb_regs->cavs_dsp1power_control &=
186 		~SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CRST;
187 
188 	soc_glb_regs->cavs_dsp1power_control &=
189 		~SOC_S1000_GLB_CTRL_DSP1_PWRCTL_CSTALL;
190 
191 	do {
192 		SOC_DCACHE_INVALIDATE(&start_rec, sizeof(start_rec));
193 	} while (start_rec.alive == 0);
194 }
195 
196 #ifdef CONFIG_SCHED_IPI_SUPPORTED
197 FUNC_ALIAS(soc_sched_ipi, arch_sched_ipi, void);
soc_sched_ipi(void)198 void soc_sched_ipi(void)
199 {
200 	if (likely(idc != NULL)) {
201 		ipm_send(idc, 0, IPM_CAVS_IDC_MSG_SCHED_IPI_ID,
202 			 IPM_CAVS_IDC_MSG_SCHED_IPI_DATA, 0);
203 	}
204 }
205 #endif
206