1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #pragma once
7 
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include "sdkconfig.h"
11 #include "soc/cpu.h"
12 #include "hal/cpu_hal.h"
13 #include "soc/compare_set.h"
14 
15 #if __XTENSA__
16 #include "xtensa/xtruntime.h"
17 #endif
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
24 #define NEED_VOLATILE_MUX volatile
25 #else
26 #define NEED_VOLATILE_MUX
27 #endif
28 
29 #define SPINLOCK_FREE          0xB33FFFFF
30 #define SPINLOCK_WAIT_FOREVER  (-1)
31 #define SPINLOCK_NO_WAIT        0
32 #define SPINLOCK_INITIALIZER   {.owner = SPINLOCK_FREE,.count = 0}
33 #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
34 
35 typedef struct {
36     NEED_VOLATILE_MUX uint32_t owner;
37     NEED_VOLATILE_MUX uint32_t count;
38 }spinlock_t;
39 
40 /**
41  * @brief Initialize a lock to its default state - unlocked
42  * @param lock - spinlock object to initialize
43  */
spinlock_initialize(spinlock_t * lock)44 static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
45 {
46     assert(lock);
47 #if !CONFIG_FREERTOS_UNICORE
48     lock->owner = SPINLOCK_FREE;
49     lock->count = 0;
50 #endif
51 }
52 
53 /**
54  * @brief Top level spinlock acquire function, spins until get the lock
55  *
56  * This function will:
57  * - Save current interrupt state, then disable interrupts
58  * - Spin until lock is acquired or until timeout occurs
59  * - Restore interrupt state
60  *
61  * @note Spinlocks alone do no constitute true critical sections (as this
62  *       function reenables interrupts once the spinlock is acquired). For critical
63  *       sections, use the interface provided by the operating system.
64  * @param lock - target spinlock object
65  * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
66  */
spinlock_acquire(spinlock_t * lock,int32_t timeout)67 static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
68 {
69 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
70     uint32_t result;
71     uint32_t irq_status;
72     uint32_t ccount_start;
73     uint32_t core_id, other_core_id;
74 
75     assert(lock);
76     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
77 
78     if(timeout != SPINLOCK_WAIT_FOREVER){
79         RSR(CCOUNT, ccount_start);
80     }
81 
82     /*spin until we own a core */
83     RSR(PRID, core_id);
84 
85     /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
86 
87     other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
88     do {
89 
90         /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
91          * CORE_ID_REGVAL_APP:
92          *  - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
93          *  - If "our" core_id, we can drop through immediately.
94          *  - If "other_core_id", we spin here.
95          */
96         result = core_id;
97 
98 #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
99         if (esp_ptr_external_ram(lock)) {
100             compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
101         } else {
102 #endif
103         compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
104 #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
105         }
106 #endif
107         if(result != other_core_id) {
108             break;
109         }
110 
111         if (timeout != SPINLOCK_WAIT_FOREVER) {
112             uint32_t ccount_now;
113             ccount_now = cpu_hal_get_cycle_count();
114             if (ccount_now - ccount_start > (unsigned)timeout) {
115                 XTOS_RESTORE_INTLEVEL(irq_status);
116                 return false;
117             }
118         }
119     }while(1);
120 
121     /* any other value implies memory corruption or uninitialized mux */
122     assert(result == core_id || result == SPINLOCK_FREE);
123     assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
124     assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
125 
126     lock->count++;
127     XTOS_RESTORE_INTLEVEL(irq_status);
128     return true;
129 
130 #else  // !CONFIG_FREERTOS_UNICORE
131     return true;
132 #endif
133 }
134 
135 /**
136  * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
137  *
138  * This function will:
139  * - Save current interrupt state, then disable interrupts
140  * - Release the spinlock
141  * - Restore interrupt state
142  *
143  * @note Spinlocks alone do no constitute true critical sections (as this
144  *       function reenables interrupts once the spinlock is acquired). For critical
145  *       sections, use the interface provided by the operating system.
146  * @param lock - target, locked before, spinlock object
147  */
spinlock_release(spinlock_t * lock)148 static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
149 {
150 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
151     uint32_t irq_status;
152     uint32_t core_id;
153 
154     assert(lock);
155     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
156 
157     RSR(PRID, core_id);
158     assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
159     lock->count--;
160 
161     if(!lock->count) {
162         lock->owner = SPINLOCK_FREE;
163     } else {
164         assert(lock->count < 0x100); // Indicates memory corruption
165     }
166 
167     XTOS_RESTORE_INTLEVEL(irq_status);
168 #endif
169 }
170 
171 #ifdef __cplusplus
172 }
173 #endif
174