1 /*
2 * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "freertos/FreeRTOS.h"
8 #include "freertos/semphr.h"
9 #include <stdatomic.h>
10 #include "sdkconfig.h"
11 #include "esp_private/spi_common_internal.h"
12 #include "esp_intr_alloc.h"
13 #include "soc/soc_caps.h"
14 #include "stdatomic.h"
15 #include "esp_log.h"
16 #include "esp_check.h"
17 #include <strings.h>
18 #include "esp_heap_caps.h"
19
20
21 /*
22 * This lock is designed to solve the conflicts between SPI devices (used in tasks) and
23 * the background operations (ISR or cache access).
24 *
25 * There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
26 *
27 * The core of the lock is a `status` atomic variable, which is always available. No intermediate
28 * status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
29 * atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
30 *
31 * Definitions of the status:
32 * - [30] WEAK_BG_FLAG, active when the BG is the cache
33 * - [29:20] LOCK bits, active when corresponding device is asking for acquiring
34 * - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
35 * - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
36 *
37 * The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
38 * state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
39 * requests. Reason of having two bits instead of one is in the appendix below.
40 *
41 * Acquiring processer means the current processor (task or ISR) allowed to touch the critical
42 * resources, or the SPI bus.
43 *
44 * States of the lock:
45 * - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
46 * operation is in progress.
47 *
48 * - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
49 * acquiring the bus.
50 *
51 * - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
52 *
53 * - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
54 *
55 *
56 * Whenever a bit is written to the status, it means the a device on a task is trying to acquire
57 * the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
58 * caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
59 * will not be invoked until scheduled by the current acquiring processor.
60 *
61 * The acquiring processor is responsible to assign the next acquiring processor by calling the
62 * scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
63 * But there is one exception, when the last bit is cleared from the status, after which there is
64 * no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
65 * don't need to call the scheduler to assign the next acquiring processor.
66 *
67 * The acquiring processor may also choose to assign a new acquiring device when there is no, by
68 * calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
69 * is still the ISR, until it calls the scheduler.
70 *
71 *
72 * Transition of the FSM:
73 *
74 * - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
75 * -> STATE_BG: by `req_core`
76 * -> STATE_ACQ: by `acquire_core`
77 *
78 * - STATE_BG:
79 * * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
80 * bits
81 * * The BG operation should be enabled while turning into this state.
82 *
83 * -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
84 * -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
85 *
86 * - STATE_BG_ACQ:
87 * * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
88 * the acquiring device.
89 * * The BG operation should be enabled while turning into this state.
90 *
91 * -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
92 * device.
93 *
94 * Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
95 * acquiring device are finished. This is to preserve the sequence of foreground (polling) and
96 * background operations of the device. The background operations queued before the acquiring
97 * should be completed first.
98 *
99 * - STATE_ACQ:
100 * * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
101 * the acquiring device.
102 * * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
103 * should be resumed while turning into this state.
104 *
105 * -> STATE_BG_ACQ: by `req_core`
106 * -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
107 * device, and the new acquiring device has active BG bits.
108 * -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
109 * but the new acquiring device has no active BG bits.
110 * -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
111 * bits.
112 * -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
113 *
114 * The `req_core` used in the task is a little special. It asks for acquiring processor for the
115 * ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
116 * role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
117 * acquiring processor. The caller of `req_core` will never become acquiring processor by this
118 * function.
119 *
120 *
121 * Appendix: The design, that having both request bit and pending bit, is to solve the
122 * concurrency issue between tasks and the bg, when the task can queue several requests,
123 * however the request bit cannot represent the number of requests queued.
124 *
125 * Here's the workflow of task and ISR work concurrently:
126 * - Task: (a) Write to Queue -> (b) Write request bit
127 * The Task have to write request bit (b) after the data is prepared in the queue (a),
128 * otherwise the BG may fail to read from the queue when it sees the request bit set.
129 *
130 * - BG: (c) Read queue -> (d) Clear request bit
131 * Since the BG cannot know the number of requests queued, it have to repeatedly check the
132 * queue (c), until it find the data is empty, and then clear the request bit (d).
133 *
134 * The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
135 * clear of the request bit. And there will be data prepared in the queue, but the request bit is
136 * inactive.
137 *
138 * (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
139 * case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
140 * bit is cleared, while the REQ bit is still active.
141 */
142
143 struct spi_bus_lock_dev_t;
144 typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
145
146 typedef struct spi_bus_lock_t spi_bus_lock_t;
147
148
149 #define MAX_DEV_NUM 10
150
151 // Bit 29-20: lock bits, Bit 19-10: pending bits
152 // Bit 9-0: request bits, Bit 30:
153 #define LOCK_SHIFT 20
154 #define PENDING_SHIFT 10
155 #define REQ_SHIFT 0
156
157 #define WEAK_BG_FLAG BIT(30) /**< The bus is permanently requested by background operations.
158 * This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
159 */
160
161 // get the bit mask wher bit [high-1, low] are all 1'b1 s.
162 #define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
163
164 #define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
165 #define REQUEST_BIT(mask) ((mask) << REQ_SHIFT)
166 #define PENDING_BIT(mask) ((mask) << PENDING_SHIFT)
167 #define DEV_MASK(id) (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
168 #define ID_DEV_MASK(mask) (__builtin_ffs(mask) - 1)
169
170 #define REQ_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
171 #define PEND_MASK BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
172 #define BG_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
173 #define LOCK_MASK BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
174
175 #define DEV_REQ_MASK(dev) ((dev)->mask & REQ_MASK)
176 #define DEV_PEND_MASK(dev) ((dev)->mask & PEND_MASK)
177 #define DEV_BG_MASK(dev) ((dev)->mask & BG_MASK)
178
179 struct spi_bus_lock_t {
180 /**
181 * The core of the lock. These bits are status of the lock, which should be always available.
182 * No intermediate status is allowed. This is realized by atomic operations, mainly
183 * `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
184 * status value ORed / ANDed with given masks.
185 *
186 * The request bits together pending bits represent the actual bg request state of one device.
187 * Either one of them being active indicates the device has pending bg requests.
188 *
189 * Whenever a bit is written to the status, it means the a device on a task is trying to
190 * acquire the lock. But this will succeed only when no LOCK or BG bits active.
191 *
192 * The acquiring processor is responsible to call the scheduler to pass its role to other tasks
193 * or the BG, unless it clear the last bit in the status register.
194 */
195 //// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
196 atomic_uint_fast32_t status;
197 spi_bus_lock_dev_t* volatile acquiring_dev; ///< The acquiring device
198 bool volatile acq_dev_bg_active; ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
199 bool volatile in_isr; ///< ISR is touching HW
200 //// End of critical resources
201
202 atomic_intptr_t dev[DEV_NUM_MAX]; ///< Child locks.
203 bg_ctrl_func_t bg_enable; ///< Function to enable background operations.
204 bg_ctrl_func_t bg_disable; ///< Function to disable background operations
205 void* bg_arg; ///< Argument for `bg_enable` and `bg_disable` functions.
206
207 spi_bus_lock_dev_t* last_dev; ///< Last used device, to decide whether to refresh all registers.
208 int periph_cs_num; ///< Number of the CS pins the HW has.
209
210 //debug information
211 int host_id; ///< Host ID, for debug information printing
212 uint32_t new_req; ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
213 };
214
215 struct spi_bus_lock_dev_t {
216 SemaphoreHandle_t semphr; ///< Binary semaphore to notify the device it claimed the bus
217 spi_bus_lock_t* parent; ///< Pointer to parent spi_bus_lock_t
218 uint32_t mask; ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
219 };
220
221 /**
222 * @note 1
223 * This critical section is only used to fix such condition:
224 *
225 * define: lock_bits = (lock->status & LOCK_MASK) >> LOCK_SHIFT; This `lock_bits` is the Bit 29-20 of the lock->status
226 *
227 * 1. spi_hdl_1:
228 * acquire_end_core():
229 * uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
230 *
231 * Becuase this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0
232 *
233 * 2. spi_hdl_2:
234 * acquire_core:
235 * uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
236 *
237 * Then here status is 0`b0, but lock_bits == 0`b10. Because this is the `spi_hdl_2`
238 *
239 * 3. spi_hdl_2:
240 * `acquire_core` return true, because status == 0. `spi_bus_lock_acquire_start(spi_hdl_2)` then won't block.
241 *
242 * 4. spi_hdl_2:
243 * spi_device_polling_end(spi_hdl_2).
244 *
245 * 5. spi_hdl_1:
246 * acquire_end_core:
247 * status is 0, so it cleas the lock->acquiring_dev
248 *
249 * 6. spi_hdl_2:
250 * spi_device_polling_end:
251 * assert(handle == get_acquiring_dev(host)); Fail
252 *
253 * @note 2
254 * Only use this critical section in this condition. The critical section scope is limited to the smallest.
255 * As `spi_bus_lock` influences the all the SPIs (including MSPI) a lot!
256 */
257 portMUX_TYPE s_spinlock = portMUX_INITIALIZER_UNLOCKED;
258
259 DRAM_ATTR static const char TAG[] = "bus_lock";
260
261 static inline int mask_get_id(uint32_t mask);
262 static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
263
264 /*******************************************************************************
265 * atomic operations to the status
266 ******************************************************************************/
lock_status_fetch_set(spi_bus_lock_t * lock,uint32_t set)267 SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
268 {
269 return atomic_fetch_or(&lock->status, set);
270 }
271
lock_status_fetch_clear(spi_bus_lock_t * lock,uint32_t clear)272 IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
273 {
274 return atomic_fetch_and(&lock->status, ~clear);
275 }
276
lock_status_fetch(spi_bus_lock_t * lock)277 IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
278 {
279 return atomic_load(&lock->status);
280 }
281
lock_status_init(spi_bus_lock_t * lock)282 SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
283 {
284 atomic_store(&lock->status, 0);
285 }
286
287 // return the remaining status bits
lock_status_clear(spi_bus_lock_t * lock,uint32_t clear)288 IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
289 {
290 //the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
291 uint32_t state = lock_status_fetch_clear(lock, clear);
292 return state & (~clear);
293 }
294
295 /*******************************************************************************
296 * Schedule service
297 *
298 * The modification to the status bits may cause rotating of the acquiring processor. It also have
299 * effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
300 * `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
301 *
302 * Most of them should be atomic, and special attention should be paid to the operation
303 * sequence.
304 ******************************************************************************/
resume_dev_in_isr(spi_bus_lock_dev_t * dev_lock,BaseType_t * do_yield)305 SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
306 {
307 xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
308 }
309
resume_dev(const spi_bus_lock_dev_t * dev_lock)310 IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
311 {
312 xSemaphoreGive(dev_lock->semphr);
313 }
314
bg_disable(spi_bus_lock_t * lock)315 SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
316 {
317 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
318 lock->bg_disable(lock->bg_arg);
319 }
320
bg_enable(spi_bus_lock_t * lock)321 IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
322 {
323 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
324 lock->bg_enable(lock->bg_arg);
325 }
326
327 // Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
328 // The caller will never become the acquiring processor after this function returns.
req_core(spi_bus_lock_dev_t * dev_handle)329 SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
330 {
331 spi_bus_lock_t *lock = dev_handle->parent;
332
333 // Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
334 // is a stable statement unless `acquire_start` or `acquire_end` is called by current
335 // device.
336 if (dev_handle == lock->acquiring_dev){
337 // Set the REQ bit and check BG bits if we are the acquiring processor.
338 // If the BG bits were not active before, invoke the BG again.
339
340 // Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
341 // setting REQ bit.
342 lock->acq_dev_bg_active = true;
343 uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
344 if ((status & DEV_BG_MASK(dev_handle)) == 0) {
345 bg_enable(lock); //acquiring processor passed to BG
346 }
347 } else {
348 uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
349 if (status == 0) {
350 bg_enable(lock); //acquiring processor passed to BG
351 }
352 }
353 }
354
355 //Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
acquire_core(spi_bus_lock_dev_t * dev_handle)356 SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
357 {
358 spi_bus_lock_t* lock = dev_handle->parent;
359
360 //For this critical section, search `@note 1` in this file, to know details
361 portENTER_CRITICAL_SAFE(&s_spinlock);
362 uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
363 portEXIT_CRITICAL_SAFE(&s_spinlock);
364
365 // Check all bits except WEAK_BG
366 if ((status & (BG_MASK | LOCK_MASK)) == 0) {
367 //succeed at once
368 lock->acquiring_dev = dev_handle;
369 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
370 if (status & WEAK_BG_FLAG) {
371 //Mainly to disable the cache (Weak_BG), that is not able to disable itself
372 bg_disable(lock);
373 }
374 return true;
375 }
376 return false;
377 }
378
379 /**
380 * Find the next acquiring processor according to the status. Will directly change
381 * the acquiring device if new one found.
382 *
383 * Cases:
384 * - BG should still be the acquiring processor (Return false):
385 * 1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
386 * 2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
387 * - BG should yield to the task (Return true):
388 * 3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
389 * 4. No acquiring device while no active BG bits: out_desired_dev=NULL
390 *
391 * Acquiring device task need to be resumed only when case 3.
392 *
393 * This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
394 *
395 * @param lock
396 * @param status Current status
397 * @param out_desired_dev Desired device to work next, see above.
398 *
399 * @return False if BG should still be the acquiring processor, otherwise True (yield to task).
400 */
401 IRAM_ATTR static inline bool
schedule_core(spi_bus_lock_t * lock,uint32_t status,spi_bus_lock_dev_t ** out_desired_dev)402 schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
403 {
404 spi_bus_lock_dev_t* desired_dev = NULL;
405 uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
406 uint32_t bg_bits = status & BG_MASK;
407 bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
408
409 bool bg_yield;
410 if (lock_bits) {
411 int dev_id = mask_get_id(lock_bits);
412 desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
413 BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
414
415 lock->acquiring_dev = desired_dev;
416 bg_yield = ((bg_bits & desired_dev->mask) == 0);
417 lock->acq_dev_bg_active = !bg_yield;
418 } else {
419 lock->acq_dev_bg_active = false;
420 if (bg_bits) {
421 int dev_id = mask_get_id(bg_bits);
422 desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
423 BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
424
425 lock->acquiring_dev = NULL;
426 bg_yield = false;
427 } else {
428 desired_dev = NULL;
429 lock->acquiring_dev = NULL;
430 bg_yield = true;
431 }
432 }
433 *out_desired_dev = desired_dev;
434 return bg_yield;
435 }
436
437 //Clear the LOCK bit and trigger a rescheduling.
acquire_end_core(spi_bus_lock_dev_t * dev_handle)438 IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
439 {
440 spi_bus_lock_t* lock = dev_handle->parent;
441 spi_bus_lock_dev_t* desired_dev = NULL;
442
443 //For this critical section, search `@note 1` in this file, to know details
444 portENTER_CRITICAL_SAFE(&s_spinlock);
445 uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
446 bool invoke_bg = !schedule_core(lock, status, &desired_dev);
447 portEXIT_CRITICAL_SAFE(&s_spinlock);
448
449 if (invoke_bg) {
450 bg_enable(lock);
451 } else if (desired_dev) {
452 resume_dev(desired_dev);
453 } else if (status & WEAK_BG_FLAG) {
454 bg_enable(lock);
455 }
456 }
457
458 // Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
459 // Have no side effects on the acquiring device/processor.
update_pend_core(spi_bus_lock_t * lock,uint32_t status)460 SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
461 {
462 uint32_t active_req_bits = status & REQ_MASK;
463 #if PENDING_SHIFT > REQ_SHIFT
464 uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
465 #else
466 uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
467 #endif
468 // We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
469 // this will not influence the effectiveness of the BG bits of every device.
470 lock_status_fetch_set(lock, pending_mask);
471 lock_status_fetch_clear(lock, active_req_bits);
472 }
473
474 // Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
475 // Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
476 // Can be called only when ISR is acting as the acquiring processor.
clear_pend_core(spi_bus_lock_dev_t * dev_handle)477 SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
478 {
479 bool finished;
480 spi_bus_lock_t *lock = dev_handle->parent;
481 uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
482 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
483
484 uint32_t status = lock_status_clear(lock, pend_mask);
485
486 if (lock->acquiring_dev == dev_handle) {
487 finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
488 if (finished) {
489 lock->acq_dev_bg_active = false;
490 }
491 } else {
492 finished = (status == 0);
493 }
494 return finished;
495 }
496
497 // Return true if the ISR has already touched the HW, which means previous operations should
498 // be terminated first, before we use the HW again. Otherwise return false.
499 // In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
bg_entry_core(spi_bus_lock_t * lock)500 SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
501 {
502 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
503 /*
504 * The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
505 *
506 * The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
507 * while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
508 * If (c) happens after (d), if things happens in this sequence:
509 * (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
510 *
511 * To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
512 */
513 bg_disable(lock);
514 if (lock->in_isr) {
515 return false;
516 } else {
517 lock->in_isr = true;
518 return true;
519 }
520 }
521
522 // Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
523 // When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
524 // When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
525 // Will not change acquiring device.
bg_exit_core(spi_bus_lock_t * lock,bool wip,BaseType_t * do_yield)526 SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
527 {
528 //See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
529 if (wip) {
530 bg_enable(lock);
531 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
532 return true;
533 }
534
535 bool ret;
536 uint32_t status = lock_status_fetch(lock);
537 if (lock->acquiring_dev) {
538 if (status & DEV_BG_MASK(lock->acquiring_dev)) {
539 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
540 ret = false;
541 } else {
542 // The request may happen any time, even after we fetched the status.
543 // The value of `acq_dev_bg_active` is random.
544 resume_dev_in_isr(lock->acquiring_dev, do_yield);
545 ret = true;
546 }
547 } else {
548 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
549 ret = !(status & BG_MASK);
550 }
551 if (ret) {
552 //when successfully exit, but no transaction done, mark BG as inactive
553 lock->in_isr = false;
554 }
555 return ret;
556 }
557
dev_wait_prepare(spi_bus_lock_dev_t * dev_handle)558 IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
559 {
560 xSemaphoreTake(dev_handle->semphr, 0);
561 }
562
dev_wait(spi_bus_lock_dev_t * dev_handle,TickType_t wait)563 SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
564 {
565 BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
566
567 if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
568 return ESP_OK;
569 }
570
571 /*******************************************************************************
572 * Initialization & Deinitialization
573 ******************************************************************************/
spi_bus_init_lock(spi_bus_lock_handle_t * out_lock,const spi_bus_lock_config_t * config)574 esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
575 {
576 spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
577 if (lock == NULL) {
578 return ESP_ERR_NO_MEM;
579 }
580
581 lock_status_init(lock);
582 lock->acquiring_dev = NULL;
583 lock->last_dev = NULL;
584 lock->periph_cs_num = config->cs_num;
585 lock->host_id = config->host_id;
586
587 *out_lock = lock;
588 return ESP_OK;
589 }
590
spi_bus_deinit_lock(spi_bus_lock_handle_t lock)591 void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
592 {
593 for (int i = 0; i < DEV_NUM_MAX; i++) {
594 assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
595 }
596 free(lock);
597 }
598
try_acquire_free_dev(spi_bus_lock_t * lock,bool cs_required)599 static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
600 {
601 if (cs_required) {
602 int i;
603 for (i = 0; i < lock->periph_cs_num; i++) {
604 intptr_t null = (intptr_t) NULL;
605 //use 1 to occupy the slot, actual setup comes later
606 if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
607 break;
608 }
609 }
610 return ((i == lock->periph_cs_num)? -1: i);
611 } else {
612 int i;
613 for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
614 intptr_t null = (intptr_t) NULL;
615 //use 1 to occupy the slot, actual setup comes later
616 if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
617 break;
618 }
619 }
620 return i;
621 }
622 }
623
spi_bus_lock_register_dev(spi_bus_lock_handle_t lock,spi_bus_lock_dev_config_t * config,spi_bus_lock_dev_handle_t * out_dev_handle)624 esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
625 spi_bus_lock_dev_handle_t *out_dev_handle)
626 {
627 if (lock == NULL) return ESP_ERR_INVALID_ARG;
628 int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
629 if (id == -1) return ESP_ERR_NOT_SUPPORTED;
630
631 spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
632 if (dev_lock == NULL) {
633 return ESP_ERR_NO_MEM;
634 }
635 dev_lock->semphr = xSemaphoreCreateBinary();
636 if (dev_lock->semphr == NULL) {
637 free(dev_lock);
638 atomic_store(&lock->dev[id], (intptr_t)NULL);
639 return ESP_ERR_NO_MEM;
640 }
641 dev_lock->parent = lock;
642 dev_lock->mask = DEV_MASK(id);
643
644 ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
645 atomic_store(&lock->dev[id], (intptr_t)dev_lock);
646 *out_dev_handle = dev_lock;
647 return ESP_OK;
648 }
649
spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)650 void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
651 {
652 int id = dev_lock_get_id(dev_handle);
653
654 spi_bus_lock_t* lock = dev_handle->parent;
655 BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
656
657 if (lock->last_dev == dev_handle) lock->last_dev = NULL;
658
659 atomic_store(&lock->dev[id], (intptr_t)NULL);
660 if (dev_handle->semphr) {
661 vSemaphoreDelete(dev_handle->semphr);
662 }
663
664 free(dev_handle);
665 }
666
mask_get_id(uint32_t mask)667 IRAM_ATTR static inline int mask_get_id(uint32_t mask)
668 {
669 return ID_DEV_MASK(mask);
670 }
671
dev_lock_get_id(spi_bus_lock_dev_t * dev_lock)672 IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
673 {
674 return mask_get_id(dev_lock->mask);
675 }
676
spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock,bg_ctrl_func_t bg_enable,bg_ctrl_func_t bg_disable,void * arg)677 void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
678 {
679 lock->bg_enable = bg_enable;
680 lock->bg_disable = bg_disable;
681 lock->bg_arg = arg;
682 }
683
spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)684 IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
685 {
686 return (dev_handle? dev_lock_get_id(dev_handle): -1);
687 }
688
689 //will be called when cache disabled
spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)690 IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
691 {
692 spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
693 dev_handle->parent->last_dev = dev_handle;
694 if (last_dev != dev_handle) {
695 int last_dev_id = (last_dev? dev_lock_get_id(last_dev): -1);
696 ESP_DRAM_LOGV(TAG, "SPI dev changed from %d to %d",
697 last_dev_id, dev_lock_get_id(dev_handle));
698 }
699 return (dev_handle != last_dev);
700 }
701
702 /*******************************************************************************
703 * Acquiring service
704 ******************************************************************************/
spi_bus_lock_acquire_start(spi_bus_lock_dev_t * dev_handle,TickType_t wait)705 IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
706 {
707 ESP_RETURN_ON_FALSE_ISR(wait == portMAX_DELAY, ESP_ERR_INVALID_ARG, TAG, "timeout other than portMAX_DELAY not supported");
708
709 spi_bus_lock_t* lock = dev_handle->parent;
710
711 // Clear the semaphore before checking
712 dev_wait_prepare(dev_handle);
713 if (!acquire_core(dev_handle)) {
714 //block until becoming the acquiring processor (help by previous acquiring processor)
715 esp_err_t err = dev_wait(dev_handle, wait);
716 //TODO: add timeout handling here.
717 if (err != ESP_OK) return err;
718 }
719
720 ESP_DRAM_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
721 BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
722
723 //When arrives at here, requests of this device should already be handled
724 uint32_t status = lock_status_fetch(lock);
725 (void) status;
726 BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
727
728 return ESP_OK;
729 }
730
spi_bus_lock_acquire_end(spi_bus_lock_dev_t * dev_handle)731 IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
732 {
733 //release the bus
734 spi_bus_lock_t* lock = dev_handle->parent;
735 ESP_RETURN_ON_FALSE_ISR(lock->acquiring_dev == dev_handle, ESP_ERR_INVALID_STATE, TAG, "Cannot release a lock that hasn't been acquired.");
736
737 acquire_end_core(dev_handle);
738
739 ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
740 return ESP_OK;
741 }
742
spi_bus_lock_get_acquiring_dev(spi_bus_lock_t * lock)743 SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
744 {
745 return lock->acquiring_dev;
746 }
747
748 /*******************************************************************************
749 * BG (background operation) service
750 ******************************************************************************/
spi_bus_lock_bg_entry(spi_bus_lock_t * lock)751 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
752 {
753 return bg_entry_core(lock);
754 }
755
spi_bus_lock_bg_exit(spi_bus_lock_t * lock,bool wip,BaseType_t * do_yield)756 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
757 {
758 return bg_exit_core(lock, wip, do_yield);
759 }
760
spi_bus_lock_bg_request(spi_bus_lock_dev_t * dev_handle)761 SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
762 {
763 req_core(dev_handle);
764 return ESP_OK;
765 }
766
spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle,TickType_t wait)767 IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
768 {
769 spi_bus_lock_t *lock = dev_handle->parent;
770
771 ESP_RETURN_ON_FALSE_ISR(lock->acquiring_dev == dev_handle, ESP_ERR_INVALID_STATE, TAG, "Cannot wait for a device that is not acquired");
772 ESP_RETURN_ON_FALSE_ISR(wait == portMAX_DELAY, ESP_ERR_INVALID_ARG, TAG, "timeout other than portMAX_DELAY not supported");
773
774 // If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
775 // cannot be executed with `bg_request` on the same device concurrently.
776 if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
777 // Clear the semaphore before checking
778 dev_wait_prepare(dev_handle);
779 if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
780 //block until becoming the acquiring processor (help by previous acquiring processor)
781 esp_err_t err = dev_wait(dev_handle, wait);
782 //TODO: add timeout handling here.
783 if (err != ESP_OK) return err;
784 }
785 }
786
787 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
788 BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
789 return ESP_OK;
790 }
791
spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t * dev_handle)792 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
793 {
794 bool finished = clear_pend_core(dev_handle);
795 ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
796 return finished;
797 }
798
spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t * lock,spi_bus_lock_dev_handle_t * out_dev_lock)799 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
800 spi_bus_lock_dev_handle_t *out_dev_lock)
801 {
802 BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
803 uint32_t status = lock_status_fetch(lock);
804 return schedule_core(lock, status, out_dev_lock);
805 }
806
spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t * dev_lock)807 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
808 {
809 spi_bus_lock_t* lock = dev_lock->parent;
810 uint32_t status = lock_status_fetch(lock);
811 uint32_t dev_status = status & dev_lock->mask;
812
813 // move REQ bits of all device to corresponding PEND bits.
814 // To reduce executing time, only done when the REQ bit of the calling device is set.
815 if (dev_status & REQ_MASK) {
816 update_pend_core(lock, status);
817 return true;
818 } else {
819 return dev_status & PEND_MASK;
820 }
821 }
822
spi_bus_lock_bg_req_exist(spi_bus_lock_t * lock)823 SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
824 {
825 uint32_t status = lock_status_fetch(lock);
826 return status & BG_MASK;
827 }
828
829 /*******************************************************************************
830 * Static variables of the locks of the main flash
831 ******************************************************************************/
832 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
833 static spi_bus_lock_dev_t lock_main_flash_dev;
834
835 static spi_bus_lock_t main_spi_bus_lock = {
836 /*
837 * the main bus cache is permanently required, this flag is set here and never clear so that the
838 * cache will always be enabled if acquiring devices yield.
839 */
840 .status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
841 .acquiring_dev = NULL,
842 .dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
843 .new_req = 0,
844 .periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
845 };
846 const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
847
spi_bus_lock_init_main_bus(void)848 esp_err_t spi_bus_lock_init_main_bus(void)
849 {
850 spi_bus_main_set_lock(g_main_spi_bus_lock);
851 return ESP_OK;
852 }
853
854 static StaticSemaphore_t main_flash_semphr;
855
856 static spi_bus_lock_dev_t lock_main_flash_dev = {
857 .semphr = NULL,
858 .parent = &main_spi_bus_lock,
859 .mask = DEV_MASK(0),
860 };
861 const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
862
spi_bus_lock_init_main_dev(void)863 esp_err_t spi_bus_lock_init_main_dev(void)
864 {
865 g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
866 if (g_spi_lock_main_flash_dev->semphr == NULL) {
867 return ESP_ERR_NO_MEM;
868 }
869 return ESP_OK;
870 }
871 #else //CONFIG_SPI_FLASH_SHARE_SPI1_BUS
872
873 //when the dev lock is not initialized, point to NULL
874 const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = NULL;
875
876 #endif
877