1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <ksched.h>
10 #include <zephyr/irq.h>
11 #include <zephyr/sys/atomic.h>
12 #include <zephyr/arch/riscv/irq.h>
13 #include <zephyr/drivers/pm_cpu_ops.h>
14
15 volatile struct {
16 arch_cpustart_t fn;
17 void *arg;
18 } riscv_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
19
20 volatile uintptr_t __noinit riscv_cpu_wake_flag;
21 volatile uintptr_t riscv_cpu_boot_flag;
22 volatile void *riscv_cpu_sp;
23
24 extern void __start(void);
25
26 #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
27 void soc_interrupt_init(void);
28 #endif
29
arch_start_cpu(int cpu_num,k_thread_stack_t * stack,int sz,arch_cpustart_t fn,void * arg)30 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
31 arch_cpustart_t fn, void *arg)
32 {
33 riscv_cpu_init[cpu_num].fn = fn;
34 riscv_cpu_init[cpu_num].arg = arg;
35
36 riscv_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
37 riscv_cpu_boot_flag = 0U;
38
39 #ifdef CONFIG_PM_CPU_OPS
40 if (pm_cpu_on(cpu_num, (uintptr_t)&__start)) {
41 printk("Failed to boot secondary CPU %d\n", cpu_num);
42 return;
43 }
44 #endif
45
46 while (riscv_cpu_boot_flag == 0U) {
47 riscv_cpu_wake_flag = _kernel.cpus[cpu_num].arch.hartid;
48 }
49 }
50
arch_secondary_cpu_init(int hartid)51 void arch_secondary_cpu_init(int hartid)
52 {
53 unsigned int i;
54 unsigned int cpu_num = 0;
55
56 for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
57 if (_kernel.cpus[i].arch.hartid == hartid) {
58 cpu_num = i;
59 }
60 }
61 csr_write(mscratch, &_kernel.cpus[cpu_num]);
62 #ifdef CONFIG_SMP
63 _kernel.cpus[cpu_num].arch.online = true;
64 #endif
65 #if defined(CONFIG_MULTITHREADING) && defined(CONFIG_THREAD_LOCAL_STORAGE)
66 __asm__("mv tp, %0" : : "r" (z_idle_threads[cpu_num].tls));
67 #endif
68 #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
69 soc_interrupt_init();
70 #endif
71 #ifdef CONFIG_RISCV_PMP
72 z_riscv_pmp_init();
73 #endif
74 #ifdef CONFIG_SMP
75 irq_enable(RISCV_IRQ_MSOFT);
76 #endif
77 riscv_cpu_init[cpu_num].fn(riscv_cpu_init[cpu_num].arg);
78 }
79
80 #ifdef CONFIG_SMP
81
82 #define MSIP_BASE 0x2000000UL
83 #define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
84
85 static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
86 #define IPI_SCHED 0
87 #define IPI_FPU_FLUSH 1
88
arch_sched_ipi(void)89 void arch_sched_ipi(void)
90 {
91 unsigned int key = arch_irq_lock();
92 unsigned int id = _current_cpu->id;
93 unsigned int num_cpus = arch_num_cpus();
94
95 for (unsigned int i = 0; i < num_cpus; i++) {
96 if (i != id && _kernel.cpus[i].arch.online) {
97 atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
98 MSIP(_kernel.cpus[i].arch.hartid) = 1;
99 }
100 }
101
102 arch_irq_unlock(key);
103 }
104
105 #ifdef CONFIG_FPU_SHARING
arch_flush_fpu_ipi(unsigned int cpu)106 void arch_flush_fpu_ipi(unsigned int cpu)
107 {
108 atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
109 MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
110 }
111 #endif
112
sched_ipi_handler(const void * unused)113 static void sched_ipi_handler(const void *unused)
114 {
115 ARG_UNUSED(unused);
116
117 MSIP(csr_read(mhartid)) = 0;
118
119 atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
120
121 if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
122 z_sched_ipi();
123 }
124 #ifdef CONFIG_FPU_SHARING
125 if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
126 /* disable IRQs */
127 csr_clear(mstatus, MSTATUS_IEN);
128 /* perform the flush */
129 arch_flush_local_fpu();
130 /*
131 * No need to re-enable IRQs here as long as
132 * this remains the last case.
133 */
134 }
135 #endif
136 }
137
138 #ifdef CONFIG_FPU_SHARING
139 /*
140 * Make sure there is no pending FPU flush request for this CPU while
141 * waiting for a contended spinlock to become available. This prevents
142 * a deadlock when the lock we need is already taken by another CPU
143 * that also wants its FPU content to be reinstated while such content
144 * is still live in this CPU's FPU.
145 */
arch_spin_relax(void)146 void arch_spin_relax(void)
147 {
148 atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
149
150 if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
151 /*
152 * We may not be in IRQ context here hence cannot use
153 * arch_flush_local_fpu() directly.
154 */
155 arch_float_disable(_current_cpu->arch.fpu_owner);
156 }
157 }
158 #endif
159
arch_smp_init(void)160 int arch_smp_init(void)
161 {
162
163 IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
164 irq_enable(RISCV_IRQ_MSOFT);
165
166 return 0;
167 }
168 SYS_INIT(arch_smp_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
169 #endif /* CONFIG_SMP */
170