1 /*
2  * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <sys/lock.h>
11 #include <sys/param.h>
12 
13 #include "esp_attr.h"
14 #include "esp_check.h"
15 #include "esp_sleep.h"
16 #include "esp_log.h"
17 #include "esp_crc.h"
18 #include "esp_heap_caps.h"
19 #include "soc/soc_caps.h"
20 #include "esp_private/sleep_cpu.h"
21 #include "sdkconfig.h"
22 
23 #if SOC_PMU_SUPPORTED
24 #include "esp_private/esp_pmu.h"
25 #else
26 #include "hal/rtc_hal.h"
27 #endif
28 
29 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
30 #include "esp_private/system_internal.h"
31 #include "hal/clk_gate_ll.h"
32 #include "hal/uart_hal.h"
33 #endif
34 
35 #include "soc/rtc_periph.h"
36 
37 #ifdef CONFIG_IDF_TARGET_ESP32S3
38 #include "esp32s3/rom/cache.h"
39 #elif CONFIG_IDF_TARGET_ESP32C6
40 #include "esp32c6/rom/rtc.h"
41 #include "riscv/rvsleep-frames.h"
42 #include "soc/intpri_reg.h"
43 #include "soc/extmem_reg.h"
44 #include "soc/plic_reg.h"
45 #include "soc/clint_reg.h"
46 #include "esp32c6/rom/cache.h"
47 #elif CONFIG_IDF_TARGET_ESP32H2
48 #include "esp32h2/rom/rtc.h"
49 #include "riscv/rvsleep-frames.h"
50 #include "soc/intpri_reg.h"
51 #include "soc/extmem_reg.h"
52 #include "soc/plic_reg.h"
53 #include "soc/clint_reg.h"
54 #include "esp32h2/rom/cache.h"
55 #endif
56 
57 static __attribute__((unused)) const char *TAG = "sleep";
58 
59 typedef struct {
60     uint32_t start;
61     uint32_t end;
62 } cpu_domain_dev_regs_region_t;
63 
64 typedef struct {
65     cpu_domain_dev_regs_region_t *region;
66     int region_num;
67     uint32_t *regs_frame;
68 } cpu_domain_dev_sleep_frame_t;
69 
70 /**
71  * Internal structure which holds all requested light sleep cpu retention parameters
72  */
73 typedef struct {
74 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
75     rtc_cntl_sleep_retent_t retent;
76 #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
77     struct {
78         RvCoreCriticalSleepFrame *critical_frame;
79         RvCoreNonCriticalSleepFrame *non_critical_frame;
80         cpu_domain_dev_sleep_frame_t *intpri_frame;
81         cpu_domain_dev_sleep_frame_t *cache_config_frame;
82         cpu_domain_dev_sleep_frame_t *plic_frame;
83         cpu_domain_dev_sleep_frame_t *clint_frame;
84     } retent;
85 #endif
86 } sleep_cpu_retention_t;
87 
88 static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
89 
90 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
91 
92 #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
cache_tagmem_retention_setup(uint32_t code_seg_vaddr,uint32_t code_seg_size,uint32_t data_seg_vaddr,uint32_t data_seg_size)93 static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
94 {
95     uint32_t sets;   /* i/d-cache total set counts */
96     uint32_t index;  /* virtual address mapping i/d-cache row offset */
97     uint32_t waysgrp;
98     uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
99     struct cache_mode imode = { .icache = 1 };
100     struct cache_mode dmode = { .icache = 0 };
101 
102     /* calculate/prepare i-cache tag memory retention parameters */
103     Cache_Get_Mode(&imode);
104     sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
105     index = (code_seg_vaddr / imode.cache_line_size) % sets;
106     waysgrp = imode.cache_ways >> 2;
107 
108     code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
109 
110     s_cpu_retention.retent.tagmem.icache.start_point = index;
111     s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
112     s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
113     if (code_seg_size < imode.cache_size / imode.cache_ways) {
114         s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
115     }
116     s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
117     icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
118     icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
119     ESP_LOGD(TAG, "I-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (imode.cache_size>>10),
120             imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
121 
122     /* calculate/prepare d-cache tag memory retention parameters */
123     Cache_Get_Mode(&dmode);
124     sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
125     index = (data_seg_vaddr / dmode.cache_line_size) % sets;
126     waysgrp = dmode.cache_ways >> 2;
127 
128     data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
129 
130     s_cpu_retention.retent.tagmem.dcache.start_point = index;
131     s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
132     s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
133 #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
134     if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
135         s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
136     }
137     s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
138 #else
139     s_cpu_retention.retent.tagmem.dcache.enable = 1;
140 #endif
141     dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
142     dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
143     ESP_LOGD(TAG, "D-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (dmode.cache_size>>10),
144             dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
145 
146     /* For I or D cache tagmem retention, backup and restore are performed through
147      * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
148      * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
149      * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
150      * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
151     return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
152 }
153 #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
154 
esp_sleep_tagmem_pd_low_init(void)155 static esp_err_t esp_sleep_tagmem_pd_low_init(void)
156 {
157 #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
158         if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
159             extern char _stext[], _etext[];
160             uint32_t code_start = (uint32_t)_stext;
161             uint32_t code_size = (uint32_t)(_etext - _stext);
162 #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
163             extern char _rodata_start[], _rodata_reserved_end[];
164             uint32_t data_start = (uint32_t)_rodata_start;
165             uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
166 #else
167             uint32_t data_start = SOC_DROM_LOW;
168             uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
169 #endif
170             ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
171                     code_start, code_size, data_start, data_size);
172             uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
173             void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
174                                                 tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
175                                                 MALLOC_CAP_RETENTION);
176             if (buf) {
177                 s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
178                                       buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
179             } else {
180                 s_cpu_retention.retent.tagmem.icache.enable = 0;
181                 s_cpu_retention.retent.tagmem.dcache.enable = 0;
182                 s_cpu_retention.retent.tagmem.link_addr = NULL;
183                 return ESP_ERR_NO_MEM;
184             }
185         }
186 #else // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
187         s_cpu_retention.retent.tagmem.icache.enable = 0;
188         s_cpu_retention.retent.tagmem.dcache.enable = 0;
189         s_cpu_retention.retent.tagmem.link_addr = NULL;
190 #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
191     return ESP_OK;
192 }
193 
esp_sleep_tagmem_pd_low_deinit(void)194 static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
195 {
196 #if SOC_PM_SUPPORT_TAGMEM_PD
197         if (s_cpu_retention.retent.tagmem.link_addr) {
198             heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
199             s_cpu_retention.retent.tagmem.icache.enable = 0;
200             s_cpu_retention.retent.tagmem.dcache.enable = 0;
201             s_cpu_retention.retent.tagmem.link_addr = NULL;
202         }
203 #endif
204     return ESP_OK;
205 }
206 #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
207 
208 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
209 
210 #pragma GCC diagnostic push
211 #pragma GCC diagnostic ignored "-Wpointer-arith"
esp_sleep_cpu_pd_low_init(void)212 esp_err_t esp_sleep_cpu_pd_low_init(void)
213 {
214     if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
215         void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
216                                             SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
217                                             MALLOC_CAP_RETENTION);
218         if (buf) {
219             s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
220                                   buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
221         } else {
222             return ESP_ERR_NO_MEM;
223         }
224     }
225 
226 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
227     if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
228 #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
229         esp_sleep_cpu_pd_low_deinit();
230         return ESP_ERR_NO_MEM;
231 #endif
232     }
233 #endif
234     return ESP_OK;
235 }
236 #pragma GCC diagnostic pop
237 
esp_sleep_cpu_pd_low_deinit(void)238 esp_err_t esp_sleep_cpu_pd_low_deinit(void)
239 {
240     if (s_cpu_retention.retent.cpu_pd_mem) {
241         heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
242         s_cpu_retention.retent.cpu_pd_mem = NULL;
243     }
244 
245 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
246     if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
247 #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
248         esp_sleep_cpu_pd_low_deinit();
249         return ESP_ERR_NO_MEM;
250 #endif
251     }
252 #endif
253     return ESP_OK;
254 }
255 
sleep_enable_cpu_retention(void)256 void sleep_enable_cpu_retention(void)
257 {
258     rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
259 
260 #if SOC_PM_SUPPORT_TAGMEM_PD
261     rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
262 #endif
263 }
264 
sleep_disable_cpu_retention(void)265 void IRAM_ATTR sleep_disable_cpu_retention(void)
266 {
267     rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
268 
269 #if SOC_PM_SUPPORT_TAGMEM_PD
270     rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
271 #endif
272 }
273 
274 #endif
275 
276 
277 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
278 
279 #define CUSTOM_CSR_PCER_MACHINE        0x7e0
280 #define CUSTOM_CSR_PCMR_MACHINE        0x7e1
281 #define CUSTOM_CSR_PCCR_MACHINE        0x7e2
282 #define CUSTOM_CSR_CPU_TESTBUS_CTRL    0x7e3
283 #define CUSTOM_CSR_PCER_USER           0x800
284 #define CUSTOM_CSR_PCMR_USER           0x801
285 #define CUSTOM_CSR_PCCR_USER           0x802
286 #define CUSTOM_CSR_GPIO_OEN_USER       0x803
287 #define CUSTOM_CSR_GPIO_IN_USER        0x804
288 #define CUSTOM_CSR_GPIO_OUT_USER       0x805
289 #define CUSTOM_CSR_CO_EXCEPTION_CAUSE  0x7f0
290 #define CUSTOM_CSR_CO_HWLP             0x7f1
291 #define CUSTOM_CSR_CO_AIA              0x7f2
292 
293 extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
294 
cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t * regions,const int region_num)295 static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
296 {
297     const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
298     int regs_frame_sz = 0;
299     for (int num = 0; num < region_num; num++) {
300         regs_frame_sz += regions[num].end - regions[num].start;
301     }
302     void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
303     if (frame) {
304         cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
305         memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
306         void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
307         memset(regs_frame, 0, regs_frame_sz);
308         *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
309             .region = region,
310             .region_num = region_num,
311             .regs_frame = (uint32_t *)regs_frame
312         };
313     }
314     return frame;
315 }
316 
cpu_domain_intpri_sleep_frame_alloc_and_init(void)317 static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
318 {
319     const static cpu_domain_dev_regs_region_t regions[] = {
320         { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
321         { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
322     };
323     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
324 }
325 
cpu_domain_cache_config_sleep_frame_alloc_and_init(void)326 static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
327 {
328     const static cpu_domain_dev_regs_region_t regions[] = {
329 #if CONFIG_IDF_TARGET_ESP32C6
330         { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
331         { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
332 #elif CONFIG_IDF_TARGET_ESP32H2
333         { .start = CACHE_L1_CACHE_CTRL_REG, .end = CACHE_L1_CACHE_CTRL_REG + 4 },
334         { .start = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
335 #endif
336     };
337     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
338 }
339 
cpu_domain_plic_sleep_frame_alloc_and_init(void)340 static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
341 {
342     const static cpu_domain_dev_regs_region_t regions[] = {
343         { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
344         { .start = PLIC_MXINT_CONF_REG,   .end = PLIC_MXINT_CONF_REG + 4  },
345         { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
346         { .start = PLIC_UXINT_CONF_REG,   .end = PLIC_UXINT_CONF_REG + 4  }
347     };
348     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
349 }
350 
cpu_domain_clint_sleep_frame_alloc_and_init(void)351 static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
352 {
353     const static cpu_domain_dev_regs_region_t regions[] = {
354         { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
355         { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
356     };
357     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
358 }
359 
esp_sleep_cpu_retention_init_impl(void)360 static esp_err_t esp_sleep_cpu_retention_init_impl(void)
361 {
362     if (s_cpu_retention.retent.critical_frame == NULL) {
363         void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
364         if (frame == NULL) {
365             goto err;
366         }
367         s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
368         rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
369     }
370     if (s_cpu_retention.retent.non_critical_frame == NULL) {
371         void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
372         if (frame == NULL) {
373             goto err;
374         }
375         s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
376     }
377     if (s_cpu_retention.retent.intpri_frame == NULL) {
378         void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
379         if (frame == NULL) {
380             goto err;
381         }
382         s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
383     }
384     if (s_cpu_retention.retent.cache_config_frame == NULL) {
385         void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
386         if (frame == NULL) {
387             goto err;
388         }
389         s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
390     }
391     if (s_cpu_retention.retent.plic_frame == NULL) {
392         void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
393         if (frame == NULL) {
394             goto err;
395         }
396         s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
397     }
398     if (s_cpu_retention.retent.clint_frame == NULL) {
399         void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
400         if (frame == NULL) {
401             goto err;
402         }
403         s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
404     }
405     return ESP_OK;
406 err:
407     esp_sleep_cpu_retention_deinit();
408     return ESP_ERR_NO_MEM;
409 }
410 
esp_sleep_cpu_retention_deinit_impl(void)411 static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
412 {
413     if (s_cpu_retention.retent.critical_frame) {
414         heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
415         s_cpu_retention.retent.critical_frame = NULL;
416         rv_core_critical_regs_frame = NULL;
417     }
418     if (s_cpu_retention.retent.non_critical_frame) {
419         heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
420         s_cpu_retention.retent.non_critical_frame = NULL;
421     }
422     if (s_cpu_retention.retent.intpri_frame) {
423         heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
424         s_cpu_retention.retent.intpri_frame = NULL;
425     }
426     if (s_cpu_retention.retent.cache_config_frame) {
427         heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
428         s_cpu_retention.retent.cache_config_frame = NULL;
429     }
430     if (s_cpu_retention.retent.plic_frame) {
431         heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
432         s_cpu_retention.retent.plic_frame = NULL;
433     }
434     if (s_cpu_retention.retent.clint_frame) {
435         heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
436         s_cpu_retention.retent.clint_frame = NULL;
437     }
438     return ESP_OK;
439 }
440 
save_mstatus_and_disable_global_int(void)441 static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
442 {
443     uint32_t mstatus;
444     __asm__ __volatile__ (
445             "csrr   %0, mstatus\n"
446             "csrci  mstatus, 0x8\n"
447             : "=r"(mstatus)
448         );
449     return mstatus;
450 }
451 
restore_mstatus(uint32_t mstatus)452 static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
453 {
454     __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
455 }
456 
rv_core_noncritical_regs_save(void)457 static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
458 {
459     assert(s_cpu_retention.retent.non_critical_frame);
460     RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
461     frame->mscratch  = RV_READ_CSR(mscratch);
462     frame->mideleg   = RV_READ_CSR(mideleg);
463     frame->misa      = RV_READ_CSR(misa);
464     frame->tselect   = RV_READ_CSR(tselect);
465     frame->tdata1    = RV_READ_CSR(tdata1);
466     frame->tdata2    = RV_READ_CSR(tdata2);
467     frame->tcontrol  = RV_READ_CSR(tcontrol);
468 
469     frame->pmpaddr0  = RV_READ_CSR(pmpaddr0);
470     frame->pmpaddr1  = RV_READ_CSR(pmpaddr1);
471     frame->pmpaddr2  = RV_READ_CSR(pmpaddr2);
472     frame->pmpaddr3  = RV_READ_CSR(pmpaddr3);
473     frame->pmpaddr4  = RV_READ_CSR(pmpaddr4);
474     frame->pmpaddr5  = RV_READ_CSR(pmpaddr5);
475     frame->pmpaddr6  = RV_READ_CSR(pmpaddr6);
476     frame->pmpaddr7  = RV_READ_CSR(pmpaddr7);
477     frame->pmpaddr8  = RV_READ_CSR(pmpaddr8);
478     frame->pmpaddr9  = RV_READ_CSR(pmpaddr9);
479     frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
480     frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
481     frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
482     frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
483     frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
484     frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
485     frame->pmpcfg0   = RV_READ_CSR(pmpcfg0);
486     frame->pmpcfg1   = RV_READ_CSR(pmpcfg1);
487     frame->pmpcfg2   = RV_READ_CSR(pmpcfg2);
488     frame->pmpcfg3   = RV_READ_CSR(pmpcfg3);
489 
490 #if SOC_CPU_HAS_PMA
491     frame->pmaaddr0  = RV_READ_CSR(CSR_PMAADDR(0));
492     frame->pmaaddr1  = RV_READ_CSR(CSR_PMAADDR(1));
493     frame->pmaaddr2  = RV_READ_CSR(CSR_PMAADDR(2));
494     frame->pmaaddr3  = RV_READ_CSR(CSR_PMAADDR(3));
495     frame->pmaaddr4  = RV_READ_CSR(CSR_PMAADDR(4));
496     frame->pmaaddr5  = RV_READ_CSR(CSR_PMAADDR(5));
497     frame->pmaaddr6  = RV_READ_CSR(CSR_PMAADDR(6));
498     frame->pmaaddr7  = RV_READ_CSR(CSR_PMAADDR(7));
499     frame->pmaaddr8  = RV_READ_CSR(CSR_PMAADDR(8));
500     frame->pmaaddr9  = RV_READ_CSR(CSR_PMAADDR(9));
501     frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10));
502     frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11));
503     frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12));
504     frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13));
505     frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14));
506     frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15));
507     frame->pmacfg0   = RV_READ_CSR(CSR_PMACFG(0));
508     frame->pmacfg1   = RV_READ_CSR(CSR_PMACFG(1));
509     frame->pmacfg2   = RV_READ_CSR(CSR_PMACFG(2));
510     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(3));
511     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(4));
512     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(5));
513     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(6));
514     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(7));
515     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(8));
516     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(9));
517     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(10));
518     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(11));
519     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(12));
520     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(13));
521     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(14));
522     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(15));
523 #endif // SOC_CPU_HAS_PMA
524 
525     frame->utvec     = RV_READ_CSR(utvec);
526     frame->ustatus   = RV_READ_CSR(ustatus);
527     frame->uepc      = RV_READ_CSR(uepc);
528     frame->ucause    = RV_READ_CSR(ucause);
529 
530     frame->mpcer     = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
531     frame->mpcmr     = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
532     frame->mpccr     = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
533     frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
534     frame->upcer     = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
535     frame->upcmr     = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
536     frame->upccr     = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
537     frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
538     frame->ugpio_in  = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
539     frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
540     return frame;
541 }
542 
rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame * frame)543 static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
544 {
545     assert(frame);
546     RV_WRITE_CSR(mscratch, frame->mscratch);
547     RV_WRITE_CSR(mideleg,  frame->mideleg);
548     RV_WRITE_CSR(misa,     frame->misa);
549     RV_WRITE_CSR(tselect,  frame->tselect);
550     RV_WRITE_CSR(tdata1,   frame->tdata1);
551     RV_WRITE_CSR(tdata2,   frame->tdata2);
552     RV_WRITE_CSR(tcontrol, frame->tcontrol);
553     RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
554     RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
555     RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
556     RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
557     RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
558     RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
559     RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
560     RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
561     RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
562     RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
563     RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
564     RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
565     RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
566     RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
567     RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
568     RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
569     RV_WRITE_CSR(pmpcfg0,  frame->pmpcfg0);
570     RV_WRITE_CSR(pmpcfg1,  frame->pmpcfg1);
571     RV_WRITE_CSR(pmpcfg2,  frame->pmpcfg2);
572     RV_WRITE_CSR(pmpcfg3,  frame->pmpcfg3);
573 
574 #if SOC_CPU_HAS_PMA
575     RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0);
576     RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1);
577     RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2);
578     RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3);
579     RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4);
580     RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5);
581     RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6);
582     RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7);
583     RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8);
584     RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9);
585     RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10);
586     RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11);
587     RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12);
588     RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13);
589     RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14);
590     RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15);
591     RV_WRITE_CSR(CSR_PMACFG(0),  frame->pmacfg0);
592     RV_WRITE_CSR(CSR_PMACFG(1),  frame->pmacfg1);
593     RV_WRITE_CSR(CSR_PMACFG(2),  frame->pmacfg2);
594     RV_WRITE_CSR(CSR_PMACFG(3),  frame->pmacfg3);
595     RV_WRITE_CSR(CSR_PMACFG(4),  frame->pmacfg4);
596     RV_WRITE_CSR(CSR_PMACFG(5),  frame->pmacfg5);
597     RV_WRITE_CSR(CSR_PMACFG(6),  frame->pmacfg6);
598     RV_WRITE_CSR(CSR_PMACFG(7),  frame->pmacfg7);
599     RV_WRITE_CSR(CSR_PMACFG(8),  frame->pmacfg8);
600     RV_WRITE_CSR(CSR_PMACFG(9),  frame->pmacfg9);
601     RV_WRITE_CSR(CSR_PMACFG(10),  frame->pmacfg10);
602     RV_WRITE_CSR(CSR_PMACFG(11),  frame->pmacfg11);
603     RV_WRITE_CSR(CSR_PMACFG(12),  frame->pmacfg12);
604     RV_WRITE_CSR(CSR_PMACFG(13),  frame->pmacfg13);
605     RV_WRITE_CSR(CSR_PMACFG(14),  frame->pmacfg14);
606     RV_WRITE_CSR(CSR_PMACFG(15),  frame->pmacfg15);
607 #endif //SOC_CPU_HAS_PMA
608 
609     RV_WRITE_CSR(utvec,    frame->utvec);
610     RV_WRITE_CSR(ustatus,  frame->ustatus);
611     RV_WRITE_CSR(uepc,     frame->uepc);
612     RV_WRITE_CSR(ucause,   frame->ucause);
613 
614     RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
615     RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
616     RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
617     RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
618     RV_WRITE_CSR(CUSTOM_CSR_PCER_USER,    frame->upcer);
619     RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER,    frame->upcmr);
620     RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER,    frame->upccr);
621     RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
622     RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
623     RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
624 }
625 
cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t * frame)626 static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
627 {
628     assert(frame);
629     cpu_domain_dev_regs_region_t *region = frame->region;
630     uint32_t *regs_frame = frame->regs_frame;
631 
632     int offset = 0;
633     for (int i = 0; i < frame->region_num; i++) {
634         for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
635             regs_frame[offset++] = *(uint32_t *)addr;
636         }
637     }
638 }
639 
cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t * frame)640 static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
641 {
642     assert(frame);
643     cpu_domain_dev_regs_region_t *region = frame->region;
644     uint32_t *regs_frame = frame->regs_frame;
645 
646     int offset = 0;
647     for (int i = 0; i < frame->region_num; i++) {
648         for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
649             *(uint32_t *)addr = regs_frame[offset++];
650         }
651     }
652 }
653 
654 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
update_retention_frame_crc(uint32_t * frame_ptr,uint32_t frame_check_size,uint32_t * frame_crc_ptr)655 static void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
656 {
657     *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size);
658 }
659 
validate_retention_frame_crc(uint32_t * frame_ptr,uint32_t frame_check_size,uint32_t * frame_crc_ptr)660 static void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
661 {
662     if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){
663         // resume uarts
664         for (int i = 0; i < SOC_UART_NUM; ++i) {
665 #ifndef CONFIG_IDF_TARGET_ESP32
666             if (!periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) {
667                 continue;
668             }
669 #endif
670             uart_ll_force_xon(i);
671         }
672 
673         /* Since it is still in the critical now, use ESP_EARLY_LOG */
674         ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted");
675         esp_restart_noos();
676     }
677 }
678 #endif
679 
680 extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
681 extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
682 typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
683 
do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,uint32_t wakeup_opt,uint32_t reject_opt,uint32_t lslp_mem_inf_fpu,bool dslp)684 static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
685         uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
686 {
687     RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
688     if ((frame->pmufunc & 0x3) == 0x1) {
689 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
690         /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */
691         update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
692 #endif
693         REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
694         return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
695     }
696 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
697     else {
698         validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
699     }
700 #endif
701 
702     return pmu_sleep_finish();
703 }
704 
esp_sleep_cpu_retention(uint32_t (* goto_sleep)(uint32_t,uint32_t,uint32_t,bool),uint32_t wakeup_opt,uint32_t reject_opt,uint32_t lslp_mem_inf_fpu,bool dslp)705 esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
706         uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
707 {
708     uint32_t mstatus = save_mstatus_and_disable_global_int();
709 
710     cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
711     cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
712     cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
713     cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
714     RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
715 
716 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
717     /* Minus sizeof(long) is for bypass `frame_crc` field */
718     update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
719 #endif
720 
721     esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
722 
723 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
724     validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
725 #endif
726 
727     rv_core_noncritical_regs_restore(frame);
728     cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
729     cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
730     cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
731     cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
732 
733     restore_mstatus(mstatus);
734     return err;
735 }
736 
737 #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
738 
739 
740 #if SOC_PM_SUPPORT_CPU_PD
741 
esp_sleep_cpu_retention_init(void)742 esp_err_t esp_sleep_cpu_retention_init(void)
743 {
744     esp_err_t err = ESP_OK;
745 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
746     err = esp_sleep_cpu_pd_low_init();
747 #elif SOC_PM_CPU_RETENTION_BY_SW
748     err = esp_sleep_cpu_retention_init_impl();
749 #endif
750     return err;
751 }
752 
esp_sleep_cpu_retention_deinit(void)753 esp_err_t esp_sleep_cpu_retention_deinit(void)
754 {
755     esp_err_t err = ESP_OK;
756 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
757     err = esp_sleep_cpu_pd_low_deinit();
758 #elif SOC_PM_CPU_RETENTION_BY_SW
759     err = esp_sleep_cpu_retention_deinit_impl();
760 #endif
761     return err;
762 }
763 
cpu_domain_pd_allowed(void)764 bool cpu_domain_pd_allowed(void)
765 {
766 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
767     return (s_cpu_retention.retent.cpu_pd_mem != NULL);
768 #elif SOC_PM_CPU_RETENTION_BY_SW
769     return (s_cpu_retention.retent.critical_frame != NULL) && \
770          (s_cpu_retention.retent.non_critical_frame != NULL) && \
771          (s_cpu_retention.retent.intpri_frame != NULL) && \
772          (s_cpu_retention.retent.cache_config_frame != NULL) && \
773          (s_cpu_retention.retent.plic_frame != NULL) && \
774          (s_cpu_retention.retent.clint_frame != NULL);
775 #else
776     return false;
777 #endif
778 }
779 
sleep_cpu_configure(bool light_sleep_enable)780 esp_err_t sleep_cpu_configure(bool light_sleep_enable)
781 {
782 #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
783     if (light_sleep_enable) {
784         ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
785     } else {
786         ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
787     }
788 #endif
789     return ESP_OK;
790 }
791 
792 #endif
793