1 /*
2 * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "freertos/FreeRTOS.h"
8 #include "freertos/semphr.h"
9 #include <stdatomic.h>
10 #include "sdkconfig.h"
11 #include "spi_common_internal.h"
12 #include "esp_intr_alloc.h"
13 #include "soc/soc_caps.h"
14 #include "stdatomic.h"
15 #include "esp_log.h"
16 #include <strings.h>
17 #include "esp_heap_caps.h"
18
19
20 /*
21 * This lock is designed to solve the conflicts between SPI devices (used in tasks) and
22 * the background operations (ISR or cache access).
23 *
24 * There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
25 *
26 * The core of the lock is a `status` atomic variable, which is always available. No intermediate
27 * status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
28 * atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
29 *
30 * Definitions of the status:
31 * - [30] WEAK_BG_FLAG, active when the BG is the cache
32 * - [29:20] LOCK bits, active when corresponding device is asking for acquiring
33 * - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
34 * - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
35 *
36 * The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
37 * state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
38 * requests. Reason of having two bits instead of one is in the appendix below.
39 *
40 * Acquiring processer means the current processor (task or ISR) allowed to touch the critical
41 * resources, or the SPI bus.
42 *
43 * States of the lock:
44 * - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
45 * operation is in progress.
46 *
47 * - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
48 * acquiring the bus.
49 *
50 * - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
51 *
52 * - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
53 *
54 *
55 * Whenever a bit is written to the status, it means the a device on a task is trying to acquire
56 * the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
57 * caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
58 * will not be invoked until scheduled by the current acquiring processor.
59 *
60 * The acquiring processor is responsible to assign the next acquiring processor by calling the
61 * scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
62 * But there is one exception, when the last bit is cleared from the status, after which there is
63 * no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
64 * don't need to call the scheduler to assign the next acquiring processor.
65 *
66 * The acquiring processor may also choose to assign a new acquiring device when there is no, by
67 * calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
68 * is still the ISR, until it calls the scheduler.
69 *
70 *
71 * Transition of the FSM:
72 *
73 * - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
74 * -> STATE_BG: by `req_core`
75 * -> STATE_ACQ: by `acquire_core`
76 *
77 * - STATE_BG:
78 * * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
79 * bits
80 * * The BG operation should be enabled while turning into this state.
81 *
82 * -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
83 * -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
84 *
85 * - STATE_BG_ACQ:
86 * * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
87 * the acquiring device.
88 * * The BG operation should be enabled while turning into this state.
89 *
90 * -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
91 * device.
92 *
93 * Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
94 * acquiring device are finished. This is to preserve the sequence of foreground (polling) and
95 * background operations of the device. The background operations queued before the acquiring
96 * should be completed first.
97 *
98 * - STATE_ACQ:
99 * * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
100 * the acquiring device.
101 * * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
102 * should be resumed while turning into this state.
103 *
104 * -> STATE_BG_ACQ: by `req_core`
105 * -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
106 * device, and the new acquiring device has active BG bits.
107 * -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
108 * but the new acquiring device has no active BG bits.
109 * -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
110 * bits.
111 * -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
112 *
113 * The `req_core` used in the task is a little special. It asks for acquiring processor for the
114 * ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
115 * role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
116 * acquiring processor. The caller of `req_core` will never become acquiring processor by this
117 * function.
118 *
119 *
120 * Appendix: The design, that having both request bit and pending bit, is to solve the
121 * concurrency issue between tasks and the bg, when the task can queue several requests,
122 * however the request bit cannot represent the number of requests queued.
123 *
124 * Here's the workflow of task and ISR work concurrently:
125 * - Task: (a) Write to Queue -> (b) Write request bit
126 * The Task have to write request bit (b) after the data is prepared in the queue (a),
127 * otherwise the BG may fail to read from the queue when it sees the request bit set.
128 *
129 * - BG: (c) Read queue -> (d) Clear request bit
130 * Since the BG cannot know the number of requests queued, it have to repeatedly check the
131 * queue (c), until it find the data is empty, and then clear the request bit (d).
132 *
133 * The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
134 * clear of the request bit. And there will be data prepared in the queue, but the request bit is
135 * inactive.
136 *
137 * (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
138 * case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
139 * bit is cleared, while the REQ bit is still active.
140 */
141
142 struct spi_bus_lock_dev_t;
143 typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
144
145 typedef struct spi_bus_lock_t spi_bus_lock_t;
146
147
148 #define MAX_DEV_NUM 10
149
150 // Bit 29-20: lock bits, Bit 19-10: pending bits
151 // Bit 9-0: request bits, Bit 30:
152 #define LOCK_SHIFT 20
153 #define PENDING_SHIFT 10
154 #define REQ_SHIFT 0
155
156 #define WEAK_BG_FLAG BIT(30) /**< The bus is permanently requested by background operations.
157 * This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
158 */
159
160 // get the bit mask wher bit [high-1, low] are all 1'b1 s.
161 #define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
162
163 #define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
164 #define REQUEST_BIT(mask) ((mask) << REQ_SHIFT)
165 #define PENDING_BIT(mask) ((mask) << PENDING_SHIFT)
166 #define DEV_MASK(id) (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
167 #define ID_DEV_MASK(mask) (__builtin_ffs(mask) - 1)
168
169 #define REQ_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
170 #define PEND_MASK BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
171 #define BG_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
172 #define LOCK_MASK BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
173
174 #define DEV_REQ_MASK(dev) ((dev)->mask & REQ_MASK)
175 #define DEV_PEND_MASK(dev) ((dev)->mask & PEND_MASK)
176 #define DEV_BG_MASK(dev) ((dev)->mask & BG_MASK)
177
178 struct spi_bus_lock_t {
179 /**
180 * The core of the lock. These bits are status of the lock, which should be always available.
181 * No intermediate status is allowed. This is realized by atomic operations, mainly
182 * `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
183 * status value ORed / ANDed with given masks.
184 *
185 * The request bits together pending bits represent the actual bg request state of one device.
186 * Either one of them being active indicates the device has pending bg requests.
187 *
188 * Whenever a bit is written to the status, it means the a device on a task is trying to
189 * acquire the lock. But this will succeed only when no LOCK or BG bits active.
190 *
191 * The acquiring processor is responsible to call the scheduler to pass its role to other tasks
192 * or the BG, unless it clear the last bit in the status register.
193 */
194 //// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
195 atomic_uint_fast32_t status;
196 spi_bus_lock_dev_t* volatile acquiring_dev; ///< The acquiring device
197 bool volatile acq_dev_bg_active; ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
198 bool volatile in_isr; ///< ISR is touching HW
199 //// End of critical resources
200
201 atomic_intptr_t dev[DEV_NUM_MAX]; ///< Child locks.
202 bg_ctrl_func_t bg_enable; ///< Function to enable background operations.
203 bg_ctrl_func_t bg_disable; ///< Function to disable background operations
204 void* bg_arg; ///< Argument for `bg_enable` and `bg_disable` functions.
205
206 spi_bus_lock_dev_t* last_dev; ///< Last used device, to decide whether to refresh all registers.
207 int periph_cs_num; ///< Number of the CS pins the HW has.
208
209 //debug information
210 int host_id; ///< Host ID, for debug information printing
211 uint32_t new_req; ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
212 };
213
214 struct spi_bus_lock_dev_t {
215 SemaphoreHandle_t semphr; ///< Binray semaphore to notify the device it claimed the bus
216 spi_bus_lock_t* parent; ///< Pointer to parent spi_bus_lock_t
217 uint32_t mask; ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
218 };
219
220 DRAM_ATTR static const char TAG[] = "bus_lock";
221
222 #define LOCK_CHECK(a, str, ret_val, ...) \
223 if (!(a)) { \
224 ESP_LOGE(TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
225 return (ret_val); \
226 }
227
228 static inline int mask_get_id(uint32_t mask);
229 static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
230
231 /*******************************************************************************
232 * atomic operations to the status
233 ******************************************************************************/
lock_status_fetch_set(spi_bus_lock_t * lock,uint32_t set)234 SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
235 {
236 return atomic_fetch_or(&lock->status, set);
237 }
238
lock_status_fetch_clear(spi_bus_lock_t * lock,uint32_t clear)239 IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
240 {
241 return atomic_fetch_and(&lock->status, ~clear);
242 }
243
lock_status_fetch(spi_bus_lock_t * lock)244 IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
245 {
246 return atomic_load(&lock->status);
247 }
248
lock_status_init(spi_bus_lock_t * lock)249 SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
250 {
251 atomic_store(&lock->status, 0);
252 }
253
254 // return the remaining status bits
lock_status_clear(spi_bus_lock_t * lock,uint32_t clear)255 IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
256 {
257 //the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
258 uint32_t state = lock_status_fetch_clear(lock, clear);
259 return state & (~clear);
260 }
261
262 /*******************************************************************************
263 * Schedule service
264 *
265 * The modification to the status bits may cause rotating of the acquiring processor. It also have
266 * effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
267 * `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
268 *
269 * Most of them should be atomic, and special attention should be paid to the operation
270 * sequence.
271 ******************************************************************************/
resume_dev_in_isr(spi_bus_lock_dev_t * dev_lock,BaseType_t * do_yield)272 SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
273 {
274 xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
275 }
276
resume_dev(const spi_bus_lock_dev_t * dev_lock)277 IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
278 {
279 xSemaphoreGive(dev_lock->semphr);
280 }
281
bg_disable(spi_bus_lock_t * lock)282 SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
283 {
284 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
285 lock->bg_disable(lock->bg_arg);
286 }
287
bg_enable(spi_bus_lock_t * lock)288 IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
289 {
290 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
291 lock->bg_enable(lock->bg_arg);
292 }
293
294 // Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
295 // The caller will never become the acquiring processor after this function returns.
req_core(spi_bus_lock_dev_t * dev_handle)296 SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
297 {
298 spi_bus_lock_t *lock = dev_handle->parent;
299
300 // Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
301 // is a stable statement unless `acquire_start` or `acquire_end` is called by current
302 // device.
303 if (dev_handle == lock->acquiring_dev){
304 // Set the REQ bit and check BG bits if we are the acquiring processor.
305 // If the BG bits were not active before, invoke the BG again.
306
307 // Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
308 // setting REQ bit.
309 lock->acq_dev_bg_active = true;
310 uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
311 if ((status & DEV_BG_MASK(dev_handle)) == 0) {
312 bg_enable(lock); //acquiring processor passed to BG
313 }
314 } else {
315 uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
316 if (status == 0) {
317 bg_enable(lock); //acquiring processor passed to BG
318 }
319 }
320 }
321
322 //Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
acquire_core(spi_bus_lock_dev_t * dev_handle)323 SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
324 {
325 spi_bus_lock_t* lock = dev_handle->parent;
326 uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
327
328 // Check all bits except WEAK_BG
329 if ((status & (BG_MASK | LOCK_MASK)) == 0) {
330 //succeed at once
331 lock->acquiring_dev = dev_handle;
332 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
333 if (status & WEAK_BG_FLAG) {
334 //Mainly to disable the cache (Weak_BG), that is not able to disable itself
335 bg_disable(lock);
336 }
337 return true;
338 }
339 return false;
340 }
341
342 /**
343 * Find the next acquiring processor according to the status. Will directly change
344 * the acquiring device if new one found.
345 *
346 * Cases:
347 * - BG should still be the acquiring processor (Return false):
348 * 1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
349 * 2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
350 * - BG should yield to the task (Return true):
351 * 3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
352 * 4. No acquiring device while no active BG bits: out_desired_dev=NULL
353 *
354 * Acquiring device task need to be resumed only when case 3.
355 *
356 * This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
357 *
358 * @param lock
359 * @param status Current status
360 * @param out_desired_dev Desired device to work next, see above.
361 *
362 * @return False if BG should still be the acquiring processor, otherwise True (yield to task).
363 */
364 IRAM_ATTR static inline bool
schedule_core(spi_bus_lock_t * lock,uint32_t status,spi_bus_lock_dev_t ** out_desired_dev)365 schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
366 {
367 spi_bus_lock_dev_t* desired_dev = NULL;
368 uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
369 uint32_t bg_bits = status & BG_MASK;
370 bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
371
372 bool bg_yield;
373 if (lock_bits) {
374 int dev_id = mask_get_id(lock_bits);
375 desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
376 BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
377
378 lock->acquiring_dev = desired_dev;
379 bg_yield = ((bg_bits & desired_dev->mask) == 0);
380 lock->acq_dev_bg_active = !bg_yield;
381 } else {
382 lock->acq_dev_bg_active = false;
383 if (bg_bits) {
384 int dev_id = mask_get_id(bg_bits);
385 desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
386 BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
387
388 lock->acquiring_dev = NULL;
389 bg_yield = false;
390 } else {
391 desired_dev = NULL;
392 lock->acquiring_dev = NULL;
393 bg_yield = true;
394 }
395 }
396 *out_desired_dev = desired_dev;
397 return bg_yield;
398 }
399
400 //Clear the LOCK bit and trigger a rescheduling.
acquire_end_core(spi_bus_lock_dev_t * dev_handle)401 IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
402 {
403 spi_bus_lock_t* lock = dev_handle->parent;
404 uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
405 spi_bus_lock_dev_t* desired_dev = NULL;
406
407 bool invoke_bg = !schedule_core(lock, status, &desired_dev);
408 if (invoke_bg) {
409 bg_enable(lock);
410 } else if (desired_dev) {
411 resume_dev(desired_dev);
412 } else if (status & WEAK_BG_FLAG) {
413 bg_enable(lock);
414 }
415 }
416
417 // Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
418 // Have no side effects on the acquiring device/processor.
update_pend_core(spi_bus_lock_t * lock,uint32_t status)419 SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
420 {
421 uint32_t active_req_bits = status & REQ_MASK;
422 #if PENDING_SHIFT > REQ_SHIFT
423 uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
424 #else
425 uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
426 #endif
427 // We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
428 // this will not influence the effectiveness of the BG bits of every device.
429 lock_status_fetch_set(lock, pending_mask);
430 lock_status_fetch_clear(lock, active_req_bits);
431 }
432
433 // Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
434 // Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
435 // Can be called only when ISR is acting as the acquiring processor.
clear_pend_core(spi_bus_lock_dev_t * dev_handle)436 SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
437 {
438 bool finished;
439 spi_bus_lock_t *lock = dev_handle->parent;
440 uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
441 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
442
443 uint32_t status = lock_status_clear(lock, pend_mask);
444
445 if (lock->acquiring_dev == dev_handle) {
446 finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
447 if (finished) {
448 lock->acq_dev_bg_active = false;
449 }
450 } else {
451 finished = (status == 0);
452 }
453 return finished;
454 }
455
456 // Return true if the ISR has already touched the HW, which means previous operations should
457 // be terminated first, before we use the HW again. Otherwise return false.
458 // In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
bg_entry_core(spi_bus_lock_t * lock)459 SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
460 {
461 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
462 /*
463 * The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
464 *
465 * The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
466 * while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
467 * If (c) happens after (d), if things happens in this sequence:
468 * (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
469 *
470 * To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
471 */
472 bg_disable(lock);
473 if (lock->in_isr) {
474 return false;
475 } else {
476 lock->in_isr = true;
477 return true;
478 }
479 }
480
481 // Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
482 // When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
483 // When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
484 // Will not change acquiring device.
bg_exit_core(spi_bus_lock_t * lock,bool wip,BaseType_t * do_yield)485 SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
486 {
487 //See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
488 if (wip) {
489 bg_enable(lock);
490 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
491 return true;
492 }
493
494 bool ret;
495 uint32_t status = lock_status_fetch(lock);
496 if (lock->acquiring_dev) {
497 if (status & DEV_BG_MASK(lock->acquiring_dev)) {
498 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
499 ret = false;
500 } else {
501 // The request may happen any time, even after we fetched the status.
502 // The value of `acq_dev_bg_active` is random.
503 resume_dev_in_isr(lock->acquiring_dev, do_yield);
504 ret = true;
505 }
506 } else {
507 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
508 ret = !(status & BG_MASK);
509 }
510 if (ret) {
511 //when successfully exit, but no transaction done, mark BG as inactive
512 lock->in_isr = false;
513 }
514 return ret;
515 }
516
dev_wait_prepare(spi_bus_lock_dev_t * dev_handle)517 IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
518 {
519 xSemaphoreTake(dev_handle->semphr, 0);
520 }
521
dev_wait(spi_bus_lock_dev_t * dev_handle,TickType_t wait)522 SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
523 {
524 BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
525
526 if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
527 return ESP_OK;
528 }
529
530 /*******************************************************************************
531 * Initialization & Deinitialization
532 ******************************************************************************/
spi_bus_init_lock(spi_bus_lock_handle_t * out_lock,const spi_bus_lock_config_t * config)533 esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
534 {
535 spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
536 if (lock == NULL) {
537 return ESP_ERR_NO_MEM;
538 }
539
540 lock_status_init(lock);
541 lock->acquiring_dev = NULL;
542 lock->last_dev = NULL;
543 lock->periph_cs_num = config->cs_num;
544 lock->host_id = config->host_id;
545
546 *out_lock = lock;
547 return ESP_OK;
548 }
549
spi_bus_deinit_lock(spi_bus_lock_handle_t lock)550 void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
551 {
552 for (int i = 0; i < DEV_NUM_MAX; i++) {
553 assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
554 }
555 free(lock);
556 }
557
try_acquire_free_dev(spi_bus_lock_t * lock,bool cs_required)558 static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
559 {
560 if (cs_required) {
561 int i;
562 for (i = 0; i < lock->periph_cs_num; i++) {
563 intptr_t null = (intptr_t) NULL;
564 //use 1 to occupy the slot, actual setup comes later
565 if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
566 break;
567 }
568 }
569 return ((i == lock->periph_cs_num)? -1: i);
570 } else {
571 int i;
572 for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
573 intptr_t null = (intptr_t) NULL;
574 //use 1 to occupy the slot, actual setup comes later
575 if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
576 break;
577 }
578 }
579 return i;
580 }
581 }
582
spi_bus_lock_register_dev(spi_bus_lock_handle_t lock,spi_bus_lock_dev_config_t * config,spi_bus_lock_dev_handle_t * out_dev_handle)583 esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
584 spi_bus_lock_dev_handle_t *out_dev_handle)
585 {
586 if (lock == NULL) return ESP_ERR_INVALID_ARG;
587 int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
588 if (id == -1) return ESP_ERR_NOT_SUPPORTED;
589
590 spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
591 if (dev_lock == NULL) {
592 return ESP_ERR_NO_MEM;
593 }
594 dev_lock->semphr = xSemaphoreCreateBinary();
595 if (dev_lock->semphr == NULL) {
596 free(dev_lock);
597 atomic_store(&lock->dev[id], (intptr_t)NULL);
598 return ESP_ERR_NO_MEM;
599 }
600 dev_lock->parent = lock;
601 dev_lock->mask = DEV_MASK(id);
602
603 ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
604 atomic_store(&lock->dev[id], (intptr_t)dev_lock);
605 *out_dev_handle = dev_lock;
606 return ESP_OK;
607 }
608
spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)609 void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
610 {
611 int id = dev_lock_get_id(dev_handle);
612
613 spi_bus_lock_t* lock = dev_handle->parent;
614 BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
615
616 if (lock->last_dev == dev_handle) lock->last_dev = NULL;
617
618 atomic_store(&lock->dev[id], (intptr_t)NULL);
619 if (dev_handle->semphr) {
620 vSemaphoreDelete(dev_handle->semphr);
621 }
622
623 free(dev_handle);
624 }
625
mask_get_id(uint32_t mask)626 IRAM_ATTR static inline int mask_get_id(uint32_t mask)
627 {
628 return ID_DEV_MASK(mask);
629 }
630
dev_lock_get_id(spi_bus_lock_dev_t * dev_lock)631 IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
632 {
633 return mask_get_id(dev_lock->mask);
634 }
635
spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock,bg_ctrl_func_t bg_enable,bg_ctrl_func_t bg_disable,void * arg)636 void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
637 {
638 lock->bg_enable = bg_enable;
639 lock->bg_disable = bg_disable;
640 lock->bg_arg = arg;
641 }
642
spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)643 IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
644 {
645 return (dev_handle? dev_lock_get_id(dev_handle): -1);
646 }
647
648 //will be called when cache disabled
spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)649 IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
650 {
651 spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
652 dev_handle->parent->last_dev = dev_handle;
653 if (last_dev != dev_handle) {
654 int last_dev_id = (last_dev? dev_lock_get_id(last_dev): -1);
655 ESP_DRAM_LOGV(TAG, "SPI dev changed from %d to %d",
656 last_dev_id, dev_lock_get_id(dev_handle));
657 }
658 return (dev_handle != last_dev);
659 }
660
661 /*******************************************************************************
662 * Acquiring service
663 ******************************************************************************/
spi_bus_lock_acquire_start(spi_bus_lock_dev_t * dev_handle,TickType_t wait)664 IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
665 {
666 LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
667
668 spi_bus_lock_t* lock = dev_handle->parent;
669
670 // Clear the semaphore before checking
671 dev_wait_prepare(dev_handle);
672 if (!acquire_core(dev_handle)) {
673 //block until becoming the acquiring processor (help by previous acquiring processor)
674 esp_err_t err = dev_wait(dev_handle, wait);
675 //TODO: add timeout handling here.
676 if (err != ESP_OK) return err;
677 }
678
679 ESP_DRAM_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
680 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
681
682 //When arrives at here, requests of this device should already be handled
683 uint32_t status = lock_status_fetch(lock);
684 (void) status;
685 BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
686
687 return ESP_OK;
688 }
689
spi_bus_lock_acquire_end(spi_bus_lock_dev_t * dev_handle)690 IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
691 {
692 //release the bus
693 spi_bus_lock_t* lock = dev_handle->parent;
694 LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot release a lock that hasn't been acquired.", ESP_ERR_INVALID_STATE);
695
696 acquire_end_core(dev_handle);
697
698 ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
699 return ESP_OK;
700 }
701
spi_bus_lock_get_acquiring_dev(spi_bus_lock_t * lock)702 SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
703 {
704 return lock->acquiring_dev;
705 }
706
707 /*******************************************************************************
708 * BG (background operation) service
709 ******************************************************************************/
spi_bus_lock_bg_entry(spi_bus_lock_t * lock)710 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
711 {
712 return bg_entry_core(lock);
713 }
714
spi_bus_lock_bg_exit(spi_bus_lock_t * lock,bool wip,BaseType_t * do_yield)715 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
716 {
717 return bg_exit_core(lock, wip, do_yield);
718 }
719
spi_bus_lock_bg_request(spi_bus_lock_dev_t * dev_handle)720 SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
721 {
722 req_core(dev_handle);
723 return ESP_OK;
724 }
725
spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle,TickType_t wait)726 IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
727 {
728 spi_bus_lock_t *lock = dev_handle->parent;
729 LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot wait for a device that is not acquired", ESP_ERR_INVALID_STATE);
730 LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
731
732 // If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
733 // cannot be executed with `bg_request` on the same device concurrently.
734 if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
735 // Clear the semaphore before checking
736 dev_wait_prepare(dev_handle);
737 if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
738 //block until becoming the acquiring processor (help by previous acquiring processor)
739 esp_err_t err = dev_wait(dev_handle, wait);
740 //TODO: add timeout handling here.
741 if (err != ESP_OK) return err;
742 }
743 }
744
745 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
746 BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
747 return ESP_OK;
748 }
749
spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t * dev_handle)750 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
751 {
752 bool finished = clear_pend_core(dev_handle);
753 ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
754 return finished;
755 }
756
spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t * lock,spi_bus_lock_dev_handle_t * out_dev_lock)757 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
758 spi_bus_lock_dev_handle_t *out_dev_lock)
759 {
760 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
761 uint32_t status = lock_status_fetch(lock);
762 return schedule_core(lock, status, out_dev_lock);
763 }
764
spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t * dev_lock)765 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
766 {
767 spi_bus_lock_t* lock = dev_lock->parent;
768 uint32_t status = lock_status_fetch(lock);
769 uint32_t dev_status = status & dev_lock->mask;
770
771 // move REQ bits of all device to corresponding PEND bits.
772 // To reduce executing time, only done when the REQ bit of the calling device is set.
773 if (dev_status & REQ_MASK) {
774 update_pend_core(lock, status);
775 return true;
776 } else {
777 return dev_status & PEND_MASK;
778 }
779 }
780
spi_bus_lock_bg_req_exist(spi_bus_lock_t * lock)781 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
782 {
783 uint32_t status = lock_status_fetch(lock);
784 return status & BG_MASK;
785 }
786
787 /*******************************************************************************
788 * Static variables of the locks of the main flash
789 ******************************************************************************/
790 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
791 static spi_bus_lock_dev_t lock_main_flash_dev;
792
793 static spi_bus_lock_t main_spi_bus_lock = {
794 /*
795 * the main bus cache is permanently required, this flag is set here and never clear so that the
796 * cache will always be enabled if acquiring devices yield.
797 */
798 .status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
799 .acquiring_dev = NULL,
800 .dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
801 .new_req = 0,
802 .periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
803 };
804 const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
805
spi_bus_lock_init_main_bus(void)806 esp_err_t spi_bus_lock_init_main_bus(void)
807 {
808 spi_bus_main_set_lock(g_main_spi_bus_lock);
809 return ESP_OK;
810 }
811
812 static StaticSemaphore_t main_flash_semphr;
813
814 static spi_bus_lock_dev_t lock_main_flash_dev = {
815 .semphr = NULL,
816 .parent = &main_spi_bus_lock,
817 .mask = DEV_MASK(0),
818 };
819 const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
820
spi_bus_lock_init_main_dev(void)821 esp_err_t spi_bus_lock_init_main_dev(void)
822 {
823 g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
824 if (g_spi_lock_main_flash_dev->semphr == NULL) {
825 return ESP_ERR_NO_MEM;
826 }
827 return ESP_OK;
828 }
829 #else //CONFIG_SPI_FLASH_SHARE_SPI1_BUS
830
831 //when the dev lock is not initialized, point to NULL
832 const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = NULL;
833
834 #endif
835