1 /*
2  * Copyright (c) 2018 Intel corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel.h>
8 #include <kernel_structs.h>
9 #include <spinlock.h>
10 #include <kswap.h>
11 #include <kernel_internal.h>
12 
13 static atomic_t global_lock;
14 static atomic_t start_flag;
15 
z_smp_global_lock(void)16 unsigned int z_smp_global_lock(void)
17 {
18 	unsigned int key = arch_irq_lock();
19 
20 	if (!_current->base.global_lock_count) {
21 		while (!atomic_cas(&global_lock, 0, 1)) {
22 		}
23 	}
24 
25 	_current->base.global_lock_count++;
26 
27 	return key;
28 }
29 
z_smp_global_unlock(unsigned int key)30 void z_smp_global_unlock(unsigned int key)
31 {
32 	if (_current->base.global_lock_count) {
33 		_current->base.global_lock_count--;
34 
35 		if (!_current->base.global_lock_count) {
36 			atomic_clear(&global_lock);
37 		}
38 	}
39 
40 	arch_irq_unlock(key);
41 }
42 
43 /* Called from within z_swap(), so assumes lock already held */
z_smp_release_global_lock(struct k_thread * thread)44 void z_smp_release_global_lock(struct k_thread *thread)
45 {
46 	if (!thread->base.global_lock_count) {
47 		atomic_clear(&global_lock);
48 	}
49 }
50 
51 #if CONFIG_MP_NUM_CPUS > 1
52 
z_smp_thread_init(void * arg,struct k_thread * thread)53 void z_smp_thread_init(void *arg, struct k_thread *thread)
54 {
55 	atomic_t *cpu_start_flag = arg;
56 
57 	/* Wait for the signal to begin scheduling */
58 	while (!atomic_get(cpu_start_flag)) {
59 	}
60 
61 	z_dummy_thread_init(thread);
62 }
63 
z_smp_thread_swap(void)64 void z_smp_thread_swap(void)
65 {
66 	z_swap_unlocked();
67 }
68 
smp_init_top(void * arg)69 static inline FUNC_NORETURN void smp_init_top(void *arg)
70 {
71 	struct k_thread dummy_thread;
72 
73 	z_smp_thread_init(arg, &dummy_thread);
74 	smp_timer_init();
75 
76 	z_swap_unlocked();
77 
78 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
79 }
80 
z_smp_start_cpu(int id)81 void z_smp_start_cpu(int id)
82 {
83 	(void)atomic_clear(&start_flag);
84 	arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
85 		       smp_init_top, &start_flag);
86 	(void)atomic_set(&start_flag, 1);
87 }
88 
89 #endif
90 
z_smp_init(void)91 void z_smp_init(void)
92 {
93 	(void)atomic_clear(&start_flag);
94 
95 #if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY)
96 	for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
97 		arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
98 			       smp_init_top, &start_flag);
99 	}
100 #endif
101 
102 	(void)atomic_set(&start_flag, 1);
103 }
104 
z_smp_cpu_mobile(void)105 bool z_smp_cpu_mobile(void)
106 {
107 	unsigned int k = arch_irq_lock();
108 	bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
109 
110 	arch_irq_unlock(k);
111 	return !pinned;
112 }
113