1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdarg.h>
8 #include <sys/param.h>  //For max/min
9 #include "esp_attr.h"
10 #include "esp_private/system_internal.h"
11 #include "esp_spi_flash.h"   //for ``g_flash_guard_default_ops``
12 #include "esp_flash.h"
13 #include "esp_flash_partitions.h"
14 #include "freertos/FreeRTOS.h"
15 #include "freertos/task.h"
16 #include "hal/spi_types.h"
17 #include "sdkconfig.h"
18 #include "esp_log.h"
19 #include "esp_compiler.h"
20 #include "esp_rom_sys.h"
21 
22 #include "driver/spi_common_internal.h"
23 
24 static const char TAG[] = "spi_flash";
25 
26 /*
27  * OS functions providing delay service and arbitration among chips, and with the cache.
28  *
29  * The cache needs to be disabled when chips on the SPI1 bus is under operation, hence these functions need to be put
30  * into the IRAM,and their data should be put into the DRAM.
31  */
32 
33 typedef struct {
34     spi_bus_lock_dev_handle_t dev_lock;
35 } app_func_arg_t;
36 
37 /*
38  * Time yield algorithm:
39  * Every time spi_flash_os_check_yield() is called:
40  *
41  * 1. If the time since last end() function is longer than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS (time
42  *     to yield), all counters will be reset, as if the yield has just ends;
43  * 2. If the time since last yield() is longer than CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS, will
44  *    return a yield request. When the yield() is called, all counters will be reset.
45  * Note: Short intervals between start() and end() after the last yield() will not reset the
46  *       counter mentioned in #2, but still be counted into the time mentioned in #2.
47  */
48 typedef struct {
49     app_func_arg_t common_arg; //shared args, must be the first item
50     bool no_protect;    //to decide whether to check protected region (for the main chip) or not.
51     uint32_t acquired_since_us;    // Time since last explicit yield()
52     uint32_t released_since_us;    // Time since last end() (implicit yield)
53 } spi1_app_func_arg_t;
54 
55 static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx);
56 static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx);
57 static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx);
58 static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx);
59 
cache_enable(void * arg)60 IRAM_ATTR static void cache_enable(void* arg)
61 {
62 #ifndef CONFIG_SPI_FLASH_AUTO_SUSPEND
63     g_flash_guard_default_ops.end();
64 #endif
65 }
66 
cache_disable(void * arg)67 IRAM_ATTR static void cache_disable(void* arg)
68 {
69 #ifndef CONFIG_SPI_FLASH_AUTO_SUSPEND
70     g_flash_guard_default_ops.start();
71 #endif
72 }
73 
spi_start(void * arg)74 static IRAM_ATTR esp_err_t spi_start(void *arg)
75 {
76     spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t *)arg)->dev_lock;
77 
78     // wait for other devices (or cache) to finish their operation
79     esp_err_t ret = spi_bus_lock_acquire_start(dev_lock, portMAX_DELAY);
80     if (ret != ESP_OK) {
81         return ret;
82     }
83     spi_bus_lock_touch(dev_lock);
84     return ESP_OK;
85 }
86 
spi_end(void * arg)87 static IRAM_ATTR esp_err_t spi_end(void *arg)
88 {
89     return spi_bus_lock_acquire_end(((app_func_arg_t *)arg)->dev_lock);
90 }
91 
spi1_start(void * arg)92 static IRAM_ATTR esp_err_t spi1_start(void *arg)
93 {
94 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
95     //use the lock to disable the cache and interrupts before using the SPI bus
96     return spi_start(arg);
97 #else
98     //directly disable the cache and interrupts when lock is not used
99     cache_disable(NULL);
100     on_spi1_acquired((spi1_app_func_arg_t*)arg);
101     return ESP_OK;
102 #endif
103 }
104 
spi1_end(void * arg)105 static IRAM_ATTR esp_err_t spi1_end(void *arg)
106 {
107     esp_err_t ret = ESP_OK;
108 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
109     ret = spi_end(arg);
110 #else
111     cache_enable(NULL);
112 #endif
113     on_spi1_released((spi1_app_func_arg_t*)arg);
114     return ret;
115 }
116 
spi1_flash_os_check_yield(void * arg,uint32_t chip_status,uint32_t * out_request)117 static IRAM_ATTR esp_err_t spi1_flash_os_check_yield(void *arg, uint32_t chip_status, uint32_t* out_request)
118 {
119     assert (chip_status == 0);  //TODO: support suspend
120     esp_err_t ret = ESP_ERR_TIMEOUT;    //Nothing happened
121     uint32_t request = 0;
122 
123     if (on_spi1_check_yield((spi1_app_func_arg_t *)arg)) {
124         request = SPI_FLASH_YIELD_REQ_YIELD;
125         ret = ESP_OK;
126     }
127     if (out_request) {
128         *out_request = request;
129     }
130     return ret;
131 }
132 
spi1_flash_os_yield(void * arg,uint32_t * out_status)133 static IRAM_ATTR esp_err_t spi1_flash_os_yield(void *arg, uint32_t* out_status)
134 {
135     if (likely(xTaskGetSchedulerState() == taskSCHEDULER_RUNNING)) {
136 #ifdef CONFIG_SPI_FLASH_ERASE_YIELD_TICKS
137         vTaskDelay(CONFIG_SPI_FLASH_ERASE_YIELD_TICKS);
138 #else
139         vTaskDelay(1);
140 #endif
141     }
142     on_spi1_yielded((spi1_app_func_arg_t*)arg);
143     return ESP_OK;
144 }
145 
delay_us(void * arg,uint32_t us)146 static IRAM_ATTR esp_err_t delay_us(void *arg, uint32_t us)
147 {
148     esp_rom_delay_us(us);
149     return ESP_OK;
150 }
151 
get_buffer_malloc(void * arg,size_t reqest_size,size_t * out_size)152 static IRAM_ATTR void* get_buffer_malloc(void* arg, size_t reqest_size, size_t* out_size)
153 {
154     /* Allocate temporary internal buffer to use for the actual read. If the preferred size
155         doesn't fit in free internal memory, allocate the largest available free block.
156 
157         (May need to shrink read_chunk_size and retry due to race conditions with other tasks
158         also allocating from the heap.)
159     */
160     void* ret = NULL;
161     unsigned retries = 5;
162     size_t read_chunk_size = reqest_size;
163     while(ret == NULL && retries--) {
164         read_chunk_size = MIN(read_chunk_size, heap_caps_get_largest_free_block(MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT));
165         read_chunk_size = (read_chunk_size + 3) & ~3;
166         ret = heap_caps_malloc(read_chunk_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
167     }
168     ESP_LOGV(TAG, "allocate temp buffer: %p (%d)", ret, read_chunk_size);
169     *out_size = (ret != NULL? read_chunk_size: 0);
170     return ret;
171 }
172 
release_buffer_malloc(void * arg,void * temp_buf)173 static IRAM_ATTR void release_buffer_malloc(void* arg, void *temp_buf)
174 {
175     free(temp_buf);
176 }
177 
main_flash_region_protected(void * arg,size_t start_addr,size_t size)178 static IRAM_ATTR esp_err_t main_flash_region_protected(void* arg, size_t start_addr, size_t size)
179 {
180     if (((spi1_app_func_arg_t*)arg)->no_protect || esp_partition_main_flash_region_safe(start_addr, size)) {
181         //ESP_OK = 0, also means protected==0
182         return ESP_OK;
183     } else {
184         return ESP_ERR_NOT_SUPPORTED;
185     }
186 }
187 
188 static DRAM_ATTR spi1_app_func_arg_t main_flash_arg = {};
189 
190 //for SPI1, we have to disable the cache and interrupts before using the SPI bus
191 static const DRAM_ATTR esp_flash_os_functions_t esp_flash_spi1_default_os_functions = {
192     .start = spi1_start,
193     .end = spi1_end,
194     .region_protected = main_flash_region_protected,
195     .delay_us = delay_us,
196     .get_temp_buffer = get_buffer_malloc,
197     .release_temp_buffer = release_buffer_malloc,
198     .check_yield = spi1_flash_os_check_yield,
199     .yield = spi1_flash_os_yield,
200 };
201 
202 static const esp_flash_os_functions_t esp_flash_spi23_default_os_functions = {
203     .start = spi_start,
204     .end = spi_end,
205     .delay_us = delay_us,
206     .get_temp_buffer = get_buffer_malloc,
207     .release_temp_buffer = release_buffer_malloc,
208     .region_protected = NULL,
209     .check_yield = NULL,
210     .yield = NULL,
211 };
212 
register_dev(int host_id)213 static spi_bus_lock_dev_handle_t register_dev(int host_id)
214 {
215     spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
216     spi_bus_lock_dev_handle_t dev_handle;
217     spi_bus_lock_dev_config_t config = {.flags = SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED};
218     esp_err_t err = spi_bus_lock_register_dev(lock, &config, &dev_handle);
219     if (err != ESP_OK) {
220         return NULL;
221     }
222     return dev_handle;
223 }
224 
esp_flash_init_os_functions(esp_flash_t * chip,int host_id,int * out_dev_id)225 esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, int* out_dev_id)
226 {
227     spi_bus_lock_dev_handle_t dev_handle = NULL;
228 
229     // Skip initializing the bus lock when the bus is SPI1 and the bus is not shared with SPI Master
230     // driver, leaving dev_handle = NULL
231     bool skip_register_dev = (host_id == SPI1_HOST);
232 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
233     skip_register_dev = false;
234 #endif
235     if (!skip_register_dev) {
236         dev_handle = register_dev(host_id);
237     }
238 
239     if (host_id == SPI1_HOST) {
240         //SPI1
241         chip->os_func = &esp_flash_spi1_default_os_functions;
242         chip->os_func_data = heap_caps_malloc(sizeof(spi1_app_func_arg_t),
243                                          MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
244         if (chip->os_func_data == NULL) {
245             return ESP_ERR_NO_MEM;
246         }
247         *(spi1_app_func_arg_t*) chip->os_func_data = (spi1_app_func_arg_t) {
248             .common_arg = {
249                 .dev_lock = dev_handle,
250             },
251             .no_protect = true,
252         };
253     } else if (host_id == SPI2_HOST || host_id == SPI3_HOST) {
254         //SPI2, SPI3
255         chip->os_func = &esp_flash_spi23_default_os_functions;
256         chip->os_func_data = heap_caps_malloc(sizeof(app_func_arg_t),
257                                          MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
258         if (chip->os_func_data == NULL) {
259             return ESP_ERR_NO_MEM;
260         }
261         *(app_func_arg_t*) chip->os_func_data = (app_func_arg_t) {
262                 .dev_lock = dev_handle,
263         };
264     } else {
265         return ESP_ERR_INVALID_ARG;
266     }
267 
268     // Bus lock not initialized, the device ID should be directly given by application.
269     if (dev_handle) {
270         *out_dev_id = spi_bus_lock_get_dev_id(dev_handle);
271     }
272 
273     return ESP_OK;
274 }
275 
esp_flash_deinit_os_functions(esp_flash_t * chip)276 esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip)
277 {
278     if (chip->os_func_data) {
279         spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t*)chip->os_func_data)->dev_lock;
280         // SPI bus lock is possible not used on SPI1 bus
281         if (dev_lock) {
282             spi_bus_lock_unregister_dev(dev_lock);
283         }
284         free(chip->os_func_data);
285     }
286     chip->os_func = NULL;
287     chip->os_func_data = NULL;
288     return ESP_OK;
289 }
290 
esp_flash_init_main_bus_lock(void)291 esp_err_t esp_flash_init_main_bus_lock(void)
292 {
293     /* The following called functions are only defined if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
294      * is set. Thus, we must not call them if the macro is not defined, else the linker
295      * would trigger errors. */
296 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
297     spi_bus_lock_init_main_bus();
298     spi_bus_lock_set_bg_control(g_main_spi_bus_lock, cache_enable, cache_disable, NULL);
299 
300     esp_err_t err = spi_bus_lock_init_main_dev();
301     if (err != ESP_OK) {
302         return err;
303     }
304     return ESP_OK;
305 #else
306     return ESP_ERR_NOT_SUPPORTED;
307 #endif
308 }
309 
esp_flash_app_enable_os_functions(esp_flash_t * chip)310 esp_err_t esp_flash_app_enable_os_functions(esp_flash_t* chip)
311 {
312     main_flash_arg = (spi1_app_func_arg_t) {
313         .common_arg = {
314             .dev_lock = g_spi_lock_main_flash_dev,   //for SPI1,
315         },
316         .no_protect = false,
317     };
318     chip->os_func = &esp_flash_spi1_default_os_functions;
319     chip->os_func_data = &main_flash_arg;
320     return ESP_OK;
321 }
322 
323 // The goal of this part is to manually insert one valid task execution interval, if the time since
324 // last valid interval exceed the limitation (CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS).
325 //
326 // Valid task execution interval: continuous time with the cache enabled, which is longer than
327 // CONFIG_SPI_FLASH_ERASE_YIELD_TICKS. Yield time shorter than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS is
328 // not treated as valid interval.
on_spi1_check_yield(spi1_app_func_arg_t * ctx)329 static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx)
330 {
331 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
332     uint32_t time = esp_system_get_time();
333     // We handle the reset here instead of in `on_spi1_acquired()`, when acquire() and release() is
334     // larger than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, to save one `esp_system_get_time()` call
335     if ((time - ctx->released_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_TICKS * portTICK_PERIOD_MS * 1000) {
336         // Reset the acquired time as if the yield has just happened.
337         ctx->acquired_since_us = time;
338     } else if ((time - ctx->acquired_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS * 1000) {
339         return true;
340     }
341 #endif
342     return false;
343 }
on_spi1_released(spi1_app_func_arg_t * ctx)344 static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx)
345 {
346 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
347     ctx->released_since_us = esp_system_get_time();
348 #endif
349 }
350 
on_spi1_acquired(spi1_app_func_arg_t * ctx)351 static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx)
352 {
353     // Ideally, when the time after `on_spi1_released()` before this function is called is larger
354     // than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, the acquired time should be reset. We assume the
355     // time after `on_spi1_check_yield()` before this function is so short that we can do the reset
356     // in that function instead.
357 }
358 
on_spi1_yielded(spi1_app_func_arg_t * ctx)359 static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx)
360 {
361     uint32_t time = esp_system_get_time();
362     ctx->acquired_since_us = time;
363 }
364