1 /* Copyright (c) 2022 Intel corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4
5 #include <zephyr/kernel.h>
6 #include <zephyr/kernel_structs.h>
7 #include <zephyr/spinlock.h>
8 #include <kswap.h>
9 #include <kernel_internal.h>
10
11 static atomic_t global_lock;
12 static atomic_t start_flag;
13 static atomic_t ready_flag;
14
z_smp_global_lock(void)15 unsigned int z_smp_global_lock(void)
16 {
17 unsigned int key = arch_irq_lock();
18
19 if (!_current->base.global_lock_count) {
20 while (!atomic_cas(&global_lock, 0, 1)) {
21 }
22 }
23
24 _current->base.global_lock_count++;
25
26 return key;
27 }
28
z_smp_global_unlock(unsigned int key)29 void z_smp_global_unlock(unsigned int key)
30 {
31 if (_current->base.global_lock_count != 0U) {
32 _current->base.global_lock_count--;
33
34 if (!_current->base.global_lock_count) {
35 atomic_clear(&global_lock);
36 }
37 }
38
39 arch_irq_unlock(key);
40 }
41
42 /* Called from within z_swap(), so assumes lock already held */
z_smp_release_global_lock(struct k_thread * thread)43 void z_smp_release_global_lock(struct k_thread *thread)
44 {
45 if (!thread->base.global_lock_count) {
46 atomic_clear(&global_lock);
47 }
48 }
49
50 /* Tiny delay that relaxes bus traffic to avoid spamming a shared
51 * memory bus looking at an atomic variable
52 */
local_delay(void)53 static inline void local_delay(void)
54 {
55 for (volatile int i = 0; i < 1000; i++) {
56 }
57 }
58
wait_for_start_signal(atomic_t * cpu_start_flag)59 static void wait_for_start_signal(atomic_t *cpu_start_flag)
60 {
61 /* Wait for the signal to begin scheduling */
62 while (!atomic_get(cpu_start_flag)) {
63 local_delay();
64 }
65 }
66
67 /* Legacy interfaces for early-version SOF CPU bringup. To be removed */
68 #ifdef CONFIG_SOF
z_smp_thread_init(void * arg,struct k_thread * thread)69 void z_smp_thread_init(void *arg, struct k_thread *thread)
70 {
71 z_dummy_thread_init(thread);
72 wait_for_start_signal(arg);
73 }
z_smp_thread_swap(void)74 void z_smp_thread_swap(void)
75 {
76 z_swap_unlocked();
77 }
78 #endif
79
smp_init_top(void * arg)80 static inline FUNC_NORETURN void smp_init_top(void *arg)
81 {
82 struct k_thread dummy_thread;
83
84 (void)atomic_set(&ready_flag, 1);
85
86 wait_for_start_signal(arg);
87 z_dummy_thread_init(&dummy_thread);
88 smp_timer_init();
89
90 z_swap_unlocked();
91
92 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
93 }
94
start_cpu(int id,atomic_t * start_flag)95 static void start_cpu(int id, atomic_t *start_flag)
96 {
97 z_init_cpu(id);
98 (void)atomic_clear(&ready_flag);
99 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
100 smp_init_top, start_flag);
101 while (!atomic_get(&ready_flag)) {
102 local_delay();
103 }
104 }
105
z_smp_start_cpu(int id)106 void z_smp_start_cpu(int id)
107 {
108 (void)atomic_set(&start_flag, 1); /* async, don't care */
109 start_cpu(id, &start_flag);
110 }
111
z_smp_init(void)112 void z_smp_init(void)
113 {
114 (void)atomic_clear(&start_flag);
115
116 unsigned int num_cpus = arch_num_cpus();
117
118 for (int i = 1; i < num_cpus; i++) {
119 start_cpu(i, &start_flag);
120 }
121 (void)atomic_set(&start_flag, 1);
122 }
123
z_smp_cpu_mobile(void)124 bool z_smp_cpu_mobile(void)
125 {
126 unsigned int k = arch_irq_lock();
127 bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
128
129 arch_irq_unlock(k);
130 return !pinned;
131 }
132