1 /*
2  * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdarg.h>
8 #include <sys/param.h>  //For max/min
9 #include "esp_attr.h"
10 #include "esp_private/system_internal.h"
11 #include "esp_flash.h"
12 #include "esp_flash_partitions.h"
13 #include "hal/spi_types.h"
14 #include "sdkconfig.h"
15 #include "esp_log.h"
16 #include "esp_compiler.h"
17 #include "esp_rom_sys.h"
18 #include "esp_private/spi_flash_os.h"
19 #include "esp_private/cache_utils.h"
20 
21 #include "esp_private/spi_common_internal.h"
22 
23 #include <zephyr/kernel.h>
24 
25 #define SPI_FLASH_CACHE_NO_DISABLE  (CONFIG_SPI_FLASH_AUTO_SUSPEND || (CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_RODATA) || CONFIG_APP_BUILD_TYPE_RAM)
26 static const char TAG[] = "spi_flash";
27 
28 #if SPI_FLASH_CACHE_NO_DISABLE
29 K_MUTEX_DEFINE(s_spi1_flash_mutex);
30 #endif  //  #if SPI_FLASH_CACHE_NO_DISABLE
31 
32 /*
33  * OS functions providing delay service and arbitration among chips, and with the cache.
34  *
35  * The cache needs to be disabled when chips on the SPI1 bus is under operation, hence these functions need to be put
36  * into the IRAM,and their data should be put into the DRAM.
37  */
38 
39 /*
40  * Time yield algorithm:
41  * Every time spi_flash_os_check_yield() is called:
42  *
43  * 1. If the time since last end() function is longer than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS (time
44  *     to yield), all counters will be reset, as if the yield has just ends;
45  * 2. If the time since last yield() is longer than CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS, will
46  *    return a yield request. When the yield() is called, all counters will be reset.
47  * Note: Short intervals between start() and end() after the last yield() will not reset the
48  *       counter mentioned in #2, but still be counted into the time mentioned in #2.
49  */
50 typedef struct {
51     spi_bus_lock_dev_handle_t dev_lock;
52     bool no_protect;    //to decide whether to check protected region (for the main chip) or not.
53     uint32_t acquired_since_us;    // Time since last explicit yield()
54     uint32_t released_since_us;    // Time since last end() (implicit yield)
55 } app_func_arg_t;
56 
57 static inline void on_spi_released(app_func_arg_t* ctx);
58 static inline void on_spi_acquired(app_func_arg_t* ctx);
59 static inline void on_spi_yielded(app_func_arg_t* ctx);
60 static inline bool on_spi_check_yield(app_func_arg_t* ctx);
61 
62 #if !SPI_FLASH_CACHE_NO_DISABLE
cache_enable(void * arg)63 IRAM_ATTR static void cache_enable(void* arg)
64 {
65     spi_flash_enable_interrupts_caches_and_other_cpu();
66 }
67 
cache_disable(void * arg)68 IRAM_ATTR static void cache_disable(void* arg)
69 {
70     spi_flash_disable_interrupts_caches_and_other_cpu();
71 }
72 #endif  //#if !SPI_FLASH_CACHE_NO_DISABLE
73 
spi1_start(void * arg)74 static IRAM_ATTR esp_err_t spi1_start(void *arg)
75 {
76     esp_err_t ret = ESP_OK;
77     /**
78      * There are three ways for ESP Flash API lock:
79      * 1. spi bus lock, this is used when SPI1 is shared with GPSPI Master Driver (not yet supported in Zephyr)
80      * 2. mutex, this is used when the Cache isn't need to be disabled.
81      * 3. cache lock (from cache_utils.h), this is used when we need to disable Cache to avoid access from SPI0
82      *
83      * From 1 to 3, the lock efficiency decreases.
84      */
85 #if SPI_FLASH_CACHE_NO_DISABLE
86     k_mutex_lock(&s_spi1_flash_mutex, K_FOREVER);
87 #else
88     //directly disable the cache and interrupts when lock is not used
89     cache_disable(NULL);
90 #endif
91     on_spi_acquired((app_func_arg_t*)arg);
92     return ret;
93 }
94 
spi1_end(void * arg)95 static IRAM_ATTR esp_err_t spi1_end(void *arg)
96 {
97     esp_err_t ret = ESP_OK;
98 
99     /**
100      * There are three ways for ESP Flash API lock, see `spi1_start`
101      */
102 #if SPI_FLASH_CACHE_NO_DISABLE
103     k_mutex_unlock(&s_spi1_flash_mutex);
104 #else
105     cache_enable(NULL);
106 #endif
107     on_spi_released((app_func_arg_t*)arg);
108     return ret;
109 }
110 
spi_flash_os_check_yield(void * arg,uint32_t chip_status,uint32_t * out_request)111 static IRAM_ATTR esp_err_t spi_flash_os_check_yield(void *arg, uint32_t chip_status, uint32_t* out_request)
112 {
113     assert (chip_status == 0);  //TODO: support suspend
114     esp_err_t ret = ESP_ERR_TIMEOUT;    //Nothing happened
115     uint32_t request = 0;
116 
117     if (on_spi_check_yield((app_func_arg_t *)arg)) {
118         request = SPI_FLASH_YIELD_REQ_YIELD;
119         ret = ESP_OK;
120     }
121     if (out_request) {
122         *out_request = request;
123     }
124     return ret;
125 }
126 
spi_flash_os_yield(void * arg,uint32_t * out_status)127 static IRAM_ATTR esp_err_t spi_flash_os_yield(void *arg, uint32_t* out_status)
128 {
129     if (likely(!k_is_pre_kernel())) {
130 #ifdef CONFIG_SPI_FLASH_ERASE_YIELD_TICKS
131         k_sleep(K_TICKS(CONFIG_SPI_FLASH_ERASE_YIELD_TICKS));
132 #else
133         k_sleep(K_TICKS(1));
134 #endif
135     }
136     on_spi_yielded((app_func_arg_t*)arg);
137     return ESP_OK;
138 }
139 
delay_us(void * arg,uint32_t us)140 static IRAM_ATTR esp_err_t delay_us(void *arg, uint32_t us)
141 {
142     esp_rom_delay_us(us);
143     return ESP_OK;
144 }
145 
get_buffer_malloc(void * arg,size_t reqest_size,size_t * out_size)146 static IRAM_ATTR void* get_buffer_malloc(void* arg, size_t reqest_size, size_t* out_size)
147 {
148     /* Allocate temporary internal buffer to use for the actual read. If the preferred size
149         doesn't fit in free internal memory, allocate the largest available free block.
150 
151         (May need to shrink read_chunk_size and retry due to race conditions with other tasks
152         also allocating from the heap.)
153     */
154     unsigned retries = 5;
155     size_t read_chunk_size = reqest_size;
156     void* ret = k_malloc(read_chunk_size);
157     while(ret == NULL && retries-- && read_chunk_size > 0) {
158         read_chunk_size = (read_chunk_size / 2 + 3) & ~3;
159         ret = k_malloc(read_chunk_size);
160     }
161     ESP_LOGV(TAG, "allocate temp buffer: %p (%d)", ret, read_chunk_size);
162     *out_size = (ret != NULL? read_chunk_size: 0);
163     return ret;
164 }
165 
release_buffer_malloc(void * arg,void * temp_buf)166 static IRAM_ATTR void release_buffer_malloc(void* arg, void *temp_buf)
167 {
168     k_free(temp_buf);
169 }
170 
main_flash_region_protected(void * arg,size_t start_addr,size_t size)171 static IRAM_ATTR esp_err_t main_flash_region_protected(void* arg, size_t start_addr, size_t size)
172 {
173     return ESP_OK;
174 }
175 
176 #if CONFIG_SPI_FLASH_BROWNOUT_RESET
main_flash_op_status(uint32_t op_status)177 static IRAM_ATTR void main_flash_op_status(uint32_t op_status)
178 {
179     bool is_erasing = op_status & SPI_FLASH_OS_IS_ERASING_STATUS_FLAG;
180     spi_flash_set_erasing_flag(is_erasing);
181 }
182 #endif
183 
184 static DRAM_ATTR app_func_arg_t main_flash_arg = {};
185 
186 //for SPI1, we have to disable the cache and interrupts before using the SPI bus
187 static const DRAM_ATTR esp_flash_os_functions_t esp_flash_spi1_default_os_functions = {
188     .start = spi1_start,
189     .end = spi1_end,
190     .region_protected = main_flash_region_protected,
191     .delay_us = delay_us,
192     .get_temp_buffer = get_buffer_malloc,
193     .release_temp_buffer = release_buffer_malloc,
194     .check_yield = spi_flash_os_check_yield,
195     .yield = spi_flash_os_yield,
196 #if CONFIG_SPI_FLASH_BROWNOUT_RESET
197     .set_flash_op_status = main_flash_op_status,
198 #else
199     .set_flash_op_status = NULL,
200 #endif
201 };
202 
use_bus_lock(int host_id)203 static bool use_bus_lock(int host_id)
204 {
205     if (host_id != SPI1_HOST) {
206         return true;
207     }
208 
209     return false;
210 }
211 
212 // This function is only called by users usually via `spi_bus_add_flash_device` to initialise os functions.
213 // System will initialise them via `esp_flash_app_enable_os_functions`
esp_flash_init_os_functions(esp_flash_t * chip,int host_id,spi_bus_lock_dev_handle_t dev_handle)214 esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, spi_bus_lock_dev_handle_t dev_handle)
215 {
216     if (use_bus_lock(host_id) && !dev_handle) {
217         return ESP_ERR_INVALID_ARG;
218     }
219 
220     chip->os_func_data = k_malloc(sizeof(app_func_arg_t));
221     if (chip->os_func_data == NULL) {
222         return ESP_ERR_NO_MEM;
223     }
224 
225     switch (host_id) {
226         case SPI1_HOST:
227             chip->os_func = &esp_flash_spi1_default_os_functions;
228             break;
229         default:
230             return ESP_ERR_INVALID_ARG;
231             break;
232     }
233 
234     *(app_func_arg_t*) chip->os_func_data = (app_func_arg_t) {
235         .dev_lock = dev_handle,
236         .no_protect = true, // This is OK because this code path isn't used for the main flash chip which requires `no_protect = false`
237     };
238 
239     return ESP_OK;
240 }
241 
esp_flash_deinit_os_functions(esp_flash_t * chip,spi_bus_lock_dev_handle_t * out_dev_handle)242 esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip, spi_bus_lock_dev_handle_t* out_dev_handle)
243 {
244     if (chip->os_func_data) {
245         // SPI bus lock is possibly not used on SPI1 bus
246         *out_dev_handle = ((app_func_arg_t*)chip->os_func_data)->dev_lock;
247         k_free(chip->os_func_data);
248     }
249     chip->os_func = NULL;
250     chip->os_func_data = NULL;
251     return ESP_OK;
252 }
253 
esp_flash_app_enable_os_functions(esp_flash_t * chip)254 esp_err_t esp_flash_app_enable_os_functions(esp_flash_t* chip)
255 {
256     main_flash_arg = (app_func_arg_t) {
257         .dev_lock = 0,
258         .no_protect = false, // Required for the main flash chip
259     };
260     chip->os_func = &esp_flash_spi1_default_os_functions;
261     chip->os_func_data = &main_flash_arg;
262     return ESP_OK;
263 }
264 
265 // The goal of this part is to manually insert one valid task execution interval, if the time since
266 // last valid interval exceed the limitation (CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS).
267 //
268 // Valid task execution interval: continuous time with the cache enabled, which is longer than
269 // CONFIG_SPI_FLASH_ERASE_YIELD_TICKS. Yield time shorter than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS is
270 // not treated as valid interval.
on_spi_check_yield(app_func_arg_t * ctx)271 static inline IRAM_ATTR bool on_spi_check_yield(app_func_arg_t* ctx)
272 {
273 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
274     uint32_t time = k_uptime_get_32();
275     // We handle the reset here instead of in `on_spi_acquired()`, when acquire() and release() is
276     // larger than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, to save one `esp_system_get_time()` call
277     if ((time - ctx->released_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_TICKS * portTICK_PERIOD_MS * 1000) {
278         // Reset the acquired time as if the yield has just happened.
279         ctx->acquired_since_us = time;
280     } else if ((time - ctx->acquired_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS * 1000) {
281         return true;
282     }
283 #endif
284     return false;
285 }
on_spi_released(app_func_arg_t * ctx)286 static inline IRAM_ATTR void on_spi_released(app_func_arg_t* ctx)
287 {
288 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
289     ctx->released_since_us = k_uptime_get_32();
290 #endif
291 }
292 
on_spi_acquired(app_func_arg_t * ctx)293 static inline IRAM_ATTR void on_spi_acquired(app_func_arg_t* ctx)
294 {
295     // Ideally, when the time after `on_spi_released()` before this function is called is larger
296     // than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, the acquired time should be reset. We assume the
297     // time after `on_spi_check_yield()` before this function is so short that we can do the reset
298     // in that function instead.
299 }
300 
on_spi_yielded(app_func_arg_t * ctx)301 static inline IRAM_ATTR void on_spi_yielded(app_func_arg_t* ctx)
302 {
303     uint32_t time = k_uptime_get_32();
304     ctx->acquired_since_us = time;
305 }
306