1 /* Copyright (c) 2022 Intel corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4
5 #include <zephyr/kernel.h>
6 #include <zephyr/kernel_structs.h>
7 #include <zephyr/spinlock.h>
8 #include <kswap.h>
9 #include <kernel_internal.h>
10
11 static atomic_t global_lock;
12 static atomic_t cpu_start_flag;
13 static atomic_t ready_flag;
14
z_smp_global_lock(void)15 unsigned int z_smp_global_lock(void)
16 {
17 unsigned int key = arch_irq_lock();
18
19 if (!_current->base.global_lock_count) {
20 while (!atomic_cas(&global_lock, 0, 1)) {
21 }
22 }
23
24 _current->base.global_lock_count++;
25
26 return key;
27 }
28
z_smp_global_unlock(unsigned int key)29 void z_smp_global_unlock(unsigned int key)
30 {
31 if (_current->base.global_lock_count != 0U) {
32 _current->base.global_lock_count--;
33
34 if (!_current->base.global_lock_count) {
35 atomic_clear(&global_lock);
36 }
37 }
38
39 arch_irq_unlock(key);
40 }
41
42 /* Called from within z_swap(), so assumes lock already held */
z_smp_release_global_lock(struct k_thread * thread)43 void z_smp_release_global_lock(struct k_thread *thread)
44 {
45 if (!thread->base.global_lock_count) {
46 atomic_clear(&global_lock);
47 }
48 }
49
50 /* Tiny delay that relaxes bus traffic to avoid spamming a shared
51 * memory bus looking at an atomic variable
52 */
local_delay(void)53 static inline void local_delay(void)
54 {
55 for (volatile int i = 0; i < 1000; i++) {
56 }
57 }
58
wait_for_start_signal(atomic_t * start_flag)59 static void wait_for_start_signal(atomic_t *start_flag)
60 {
61 /* Wait for the signal to begin scheduling */
62 while (!atomic_get(start_flag)) {
63 local_delay();
64 }
65 }
66
67 /* Legacy interfaces for early-version SOF CPU bringup. To be removed */
68 #ifdef CONFIG_SOF
z_smp_thread_init(void * arg,struct k_thread * thread)69 void z_smp_thread_init(void *arg, struct k_thread *thread)
70 {
71 z_dummy_thread_init(thread);
72 wait_for_start_signal(arg);
73 }
z_smp_thread_swap(void)74 void z_smp_thread_swap(void)
75 {
76 z_swap_unlocked();
77 }
78 #endif
79
smp_init_top(void * arg)80 static inline FUNC_NORETURN void smp_init_top(void *arg)
81 {
82 struct k_thread dummy_thread;
83
84 (void)atomic_set(&ready_flag, 1);
85
86 wait_for_start_signal(arg);
87 z_dummy_thread_init(&dummy_thread);
88 #ifdef CONFIG_SYS_CLOCK_EXISTS
89 smp_timer_init();
90 #endif
91
92 z_swap_unlocked();
93
94 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
95 }
96
start_cpu(int id,atomic_t * start_flag)97 static void start_cpu(int id, atomic_t *start_flag)
98 {
99 z_init_cpu(id);
100 (void)atomic_clear(&ready_flag);
101 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
102 smp_init_top, start_flag);
103 while (!atomic_get(&ready_flag)) {
104 local_delay();
105 }
106 }
107
z_smp_start_cpu(int id)108 void z_smp_start_cpu(int id)
109 {
110 (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */
111 start_cpu(id, &cpu_start_flag);
112 }
113
z_smp_init(void)114 void z_smp_init(void)
115 {
116 (void)atomic_clear(&cpu_start_flag);
117
118 unsigned int num_cpus = arch_num_cpus();
119
120 for (int i = 1; i < num_cpus; i++) {
121 start_cpu(i, &cpu_start_flag);
122 }
123 (void)atomic_set(&cpu_start_flag, 1);
124 }
125
z_smp_cpu_mobile(void)126 bool z_smp_cpu_mobile(void)
127 {
128 unsigned int k = arch_irq_lock();
129 bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
130
131 arch_irq_unlock(k);
132 return !pinned;
133 }
134