1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #pragma once
7 
8 #include "sdkconfig.h"
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include "esp_cpu.h"
12 
13 #if __XTENSA__
14 #include "xtensa/xtruntime.h"
15 #include "xt_utils.h"
16 #endif
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
23 #define NEED_VOLATILE_MUX volatile
24 #else
25 #define NEED_VOLATILE_MUX
26 #endif
27 
28 #define SPINLOCK_FREE          0xB33FFFFF
29 #define SPINLOCK_WAIT_FOREVER  (-1)
30 #define SPINLOCK_NO_WAIT        0
31 #define SPINLOCK_INITIALIZER   {.owner = SPINLOCK_FREE,.count = 0}
32 #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
33 
34 typedef struct {
35     NEED_VOLATILE_MUX uint32_t owner;
36     NEED_VOLATILE_MUX uint32_t count;
37 } spinlock_t;
38 
39 /**
40  * @brief Initialize a lock to its default state - unlocked
41  * @param lock - spinlock object to initialize
42  */
spinlock_initialize(spinlock_t * lock)43 static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
44 {
45     assert(lock);
46 #if !CONFIG_FREERTOS_UNICORE
47     lock->owner = SPINLOCK_FREE;
48     lock->count = 0;
49 #endif
50 }
51 
52 /**
53  * @brief Top level spinlock acquire function, spins until get the lock
54  *
55  * This function will:
56  * - Save current interrupt state, then disable interrupts
57  * - Spin until lock is acquired or until timeout occurs
58  * - Restore interrupt state
59  *
60  * @note Spinlocks alone do no constitute true critical sections (as this
61  *       function reenables interrupts once the spinlock is acquired). For critical
62  *       sections, use the interface provided by the operating system.
63  * @param lock - target spinlock object
64  * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
65  */
spinlock_acquire(spinlock_t * lock,int32_t timeout)66 static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
67 {
68 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
69     uint32_t irq_status;
70     uint32_t core_id, other_core_id;
71     bool lock_set;
72     esp_cpu_cycle_count_t start_count;
73 
74     assert(lock);
75     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
76 
77     // Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
78     core_id = xt_utils_get_raw_core_id();
79     other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
80 
81     /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
82      * CORE_ID_REGVAL_APP:
83      *  - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
84      *  - If "our" core_id, we can drop through immediately.
85      *  - If "other_core_id", we spin here.
86      */
87 
88     // The caller is already the owner of the lock. Simply increment the nesting count
89     if (lock->owner == core_id) {
90         assert(lock->count > 0 && lock->count < 0xFF);    // Bad count value implies memory corruption
91         lock->count++;
92         XTOS_RESTORE_INTLEVEL(irq_status);
93         return true;
94     }
95 
96     /* First attempt to take the lock.
97      *
98      * Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
99      * esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
100      * is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
101      * aren't meant to be held for long).
102      */
103     lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
104     if (lock_set || timeout == SPINLOCK_NO_WAIT) {
105         // We've successfully taken the lock, or we are not retrying
106         goto exit;
107     }
108 
109     // First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
110     start_count = esp_cpu_get_cycle_count();
111     do {
112         lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
113         if (lock_set) {
114             break;
115         }
116         // Keep looping if we are waiting forever, or check if we have timed out
117     } while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
118 
119 exit:
120     if (lock_set) {
121         assert(lock->owner == core_id);
122         assert(lock->count == 0);   // This is the first time the lock is set, so count should still be 0
123         lock->count++;  // Finally, we increment the lock count
124     } else {    // We timed out waiting for lock
125         assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
126         assert(lock->count < 0xFF); // Bad count value implies memory corruption
127     }
128 
129     XTOS_RESTORE_INTLEVEL(irq_status);
130     return lock_set;
131 
132 #else  // !CONFIG_FREERTOS_UNICORE
133     return true;
134 #endif
135 }
136 
137 /**
138  * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
139  *
140  * This function will:
141  * - Save current interrupt state, then disable interrupts
142  * - Release the spinlock
143  * - Restore interrupt state
144  *
145  * @note Spinlocks alone do no constitute true critical sections (as this
146  *       function reenables interrupts once the spinlock is acquired). For critical
147  *       sections, use the interface provided by the operating system.
148  * @param lock - target, locked before, spinlock object
149  */
spinlock_release(spinlock_t * lock)150 static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
151 {
152 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
153     uint32_t irq_status;
154     uint32_t core_id;
155 
156     assert(lock);
157     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
158 
159     core_id = xt_utils_get_raw_core_id();
160     assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
161     lock->count--;
162 
163     if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
164         lock->owner = SPINLOCK_FREE;
165     } else {
166         assert(lock->count < 0x100); // Indicates memory corruption
167     }
168 
169     XTOS_RESTORE_INTLEVEL(irq_status);
170 #endif
171 }
172 
173 #ifdef __cplusplus
174 }
175 #endif
176