1 /*
2  * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <sys/lock.h>
11 #include <sys/param.h>
12 
13 #include "esp_attr.h"
14 #include "esp_check.h"
15 #include "esp_sleep.h"
16 #include "esp_log.h"
17 #include "esp_rom_crc.h"
18 #include "esp_heap_caps.h"
19 #include "soc/soc_caps.h"
20 #include "esp_private/sleep_cpu.h"
21 #include "esp_private/sleep_event.h"
22 #include "sdkconfig.h"
23 
24 #if SOC_PMU_SUPPORTED
25 #include "esp_private/esp_pmu.h"
26 #else
27 #include "hal/rtc_hal.h"
28 #endif
29 
30 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
31 #include "esp_private/system_internal.h"
32 #include "hal/clk_gate_ll.h"
33 #include "hal/uart_hal.h"
34 #endif
35 
36 #include "soc/rtc_periph.h"
37 
38 #ifdef CONFIG_IDF_TARGET_ESP32S3
39 #include "esp32s3/rom/cache.h"
40 #elif CONFIG_IDF_TARGET_ESP32C6
41 #include "riscv/csr.h"
42 #include "esp32c6/rom/rtc.h"
43 #include "riscv/rvsleep-frames.h"
44 #include "soc/intpri_reg.h"
45 #include "soc/extmem_reg.h"
46 #include "soc/plic_reg.h"
47 #include "soc/clint_reg.h"
48 #include "esp32c6/rom/cache.h"
49 #elif CONFIG_IDF_TARGET_ESP32H2
50 #include "esp32h2/rom/rtc.h"
51 #include "riscv/rvsleep-frames.h"
52 #include "soc/intpri_reg.h"
53 #include "soc/extmem_reg.h"
54 #include "soc/plic_reg.h"
55 #include "soc/clint_reg.h"
56 #include "esp32h2/rom/cache.h"
57 #endif
58 
59 static __attribute__((unused)) const char *TAG = "sleep";
60 
61 typedef struct {
62     uint32_t start;
63     uint32_t end;
64 } cpu_domain_dev_regs_region_t;
65 
66 typedef struct {
67     cpu_domain_dev_regs_region_t *region;
68     int region_num;
69     uint32_t *regs_frame;
70 } cpu_domain_dev_sleep_frame_t;
71 
72 /**
73  * Internal structure which holds all requested light sleep cpu retention parameters
74  */
75 typedef struct {
76 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
77     rtc_cntl_sleep_retent_t retent;
78 #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
79     struct {
80         RvCoreCriticalSleepFrame *critical_frame;
81         RvCoreNonCriticalSleepFrame *non_critical_frame;
82         cpu_domain_dev_sleep_frame_t *intpri_frame;
83         cpu_domain_dev_sleep_frame_t *cache_config_frame;
84         cpu_domain_dev_sleep_frame_t *plic_frame;
85         cpu_domain_dev_sleep_frame_t *clint_frame;
86     } retent;
87 #endif
88 } sleep_cpu_retention_t;
89 
90 static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
91 
92 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
93 
94 #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
cache_tagmem_retention_setup(uint32_t code_seg_vaddr,uint32_t code_seg_size,uint32_t data_seg_vaddr,uint32_t data_seg_size)95 static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
96 {
97     uint32_t sets;   /* i/d-cache total set counts */
98     uint32_t index;  /* virtual address mapping i/d-cache row offset */
99     uint32_t waysgrp;
100     uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
101     struct cache_mode imode = { .icache = 1 };
102     struct cache_mode dmode = { .icache = 0 };
103 
104     /* calculate/prepare i-cache tag memory retention parameters */
105     Cache_Get_Mode(&imode);
106     sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
107     index = (code_seg_vaddr / imode.cache_line_size) % sets;
108     waysgrp = imode.cache_ways >> 2;
109 
110     code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
111 
112     s_cpu_retention.retent.tagmem.icache.start_point = index;
113     s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
114     s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
115     if (code_seg_size < imode.cache_size / imode.cache_ways) {
116         s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
117     }
118     s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
119     icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
120     icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
121     ESP_LOGD(TAG, "I-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (imode.cache_size>>10),
122             imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
123 
124     /* calculate/prepare d-cache tag memory retention parameters */
125     Cache_Get_Mode(&dmode);
126     sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
127     index = (data_seg_vaddr / dmode.cache_line_size) % sets;
128     waysgrp = dmode.cache_ways >> 2;
129 
130     data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
131 
132     s_cpu_retention.retent.tagmem.dcache.start_point = index;
133     s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
134     s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
135 #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
136     if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
137         s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
138     }
139     s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
140 #else
141     s_cpu_retention.retent.tagmem.dcache.enable = 1;
142 #endif
143     dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
144     dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
145     ESP_LOGD(TAG, "D-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (dmode.cache_size>>10),
146             dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
147 
148     /* For I or D cache tagmem retention, backup and restore are performed through
149      * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
150      * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
151      * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
152      * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
153     return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
154 }
155 #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
156 
esp_sleep_tagmem_pd_low_init(void)157 static esp_err_t esp_sleep_tagmem_pd_low_init(void)
158 {
159 #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
160         if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
161             extern char _stext[], _etext[];
162             uint32_t code_start = (uint32_t)_stext;
163             uint32_t code_size = (uint32_t)(_etext - _stext);
164 #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
165             extern char _rodata_start[], _rodata_reserved_end[];
166             uint32_t data_start = (uint32_t)_rodata_start;
167             uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
168 #else
169             uint32_t data_start = SOC_DROM_LOW;
170             uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
171 #endif
172             ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
173                     code_start, code_size, data_start, data_size);
174             uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
175             void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
176                                                 tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
177                                                 MALLOC_CAP_RETENTION);
178             if (buf) {
179                 s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
180                                       buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
181             } else {
182                 s_cpu_retention.retent.tagmem.icache.enable = 0;
183                 s_cpu_retention.retent.tagmem.dcache.enable = 0;
184                 s_cpu_retention.retent.tagmem.link_addr = NULL;
185                 return ESP_ERR_NO_MEM;
186             }
187         }
188 #else // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
189         s_cpu_retention.retent.tagmem.icache.enable = 0;
190         s_cpu_retention.retent.tagmem.dcache.enable = 0;
191         s_cpu_retention.retent.tagmem.link_addr = NULL;
192 #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
193     return ESP_OK;
194 }
195 
esp_sleep_tagmem_pd_low_deinit(void)196 static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
197 {
198 #if SOC_PM_SUPPORT_TAGMEM_PD
199         if (s_cpu_retention.retent.tagmem.link_addr) {
200             heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
201             s_cpu_retention.retent.tagmem.icache.enable = 0;
202             s_cpu_retention.retent.tagmem.dcache.enable = 0;
203             s_cpu_retention.retent.tagmem.link_addr = NULL;
204         }
205 #endif
206     return ESP_OK;
207 }
208 #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
209 
210 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
211 
esp_sleep_cpu_pd_low_init(void)212 esp_err_t esp_sleep_cpu_pd_low_init(void)
213 {
214     if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
215         void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
216                                             SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
217                                             MALLOC_CAP_RETENTION);
218         if (buf) {
219             s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
220                                   (uint8_t *)buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
221         } else {
222             return ESP_ERR_NO_MEM;
223         }
224     }
225 
226 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
227     if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
228 #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
229         esp_sleep_cpu_pd_low_deinit();
230         return ESP_ERR_NO_MEM;
231 #endif
232     }
233 #endif
234     return ESP_OK;
235 }
236 
esp_sleep_cpu_pd_low_deinit(void)237 esp_err_t esp_sleep_cpu_pd_low_deinit(void)
238 {
239     if (s_cpu_retention.retent.cpu_pd_mem) {
240         heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
241         s_cpu_retention.retent.cpu_pd_mem = NULL;
242     }
243 
244 #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
245     if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
246 #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
247         esp_sleep_cpu_pd_low_deinit();
248         return ESP_ERR_NO_MEM;
249 #endif
250     }
251 #endif
252     return ESP_OK;
253 }
254 
sleep_enable_cpu_retention(void)255 void sleep_enable_cpu_retention(void)
256 {
257     rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
258 
259 #if SOC_PM_SUPPORT_TAGMEM_PD
260     rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
261 #endif
262 }
263 
sleep_disable_cpu_retention(void)264 void IRAM_ATTR sleep_disable_cpu_retention(void)
265 {
266     rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
267 
268 #if SOC_PM_SUPPORT_TAGMEM_PD
269     rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
270 #endif
271 }
272 
273 #endif
274 
275 
276 #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
277 
278 #define CUSTOM_CSR_PCER_MACHINE        0x7e0
279 #define CUSTOM_CSR_PCMR_MACHINE        0x7e1
280 #define CUSTOM_CSR_PCCR_MACHINE        0x7e2
281 #define CUSTOM_CSR_CPU_TESTBUS_CTRL    0x7e3
282 #define CUSTOM_CSR_PCER_USER           0x800
283 #define CUSTOM_CSR_PCMR_USER           0x801
284 #define CUSTOM_CSR_PCCR_USER           0x802
285 #define CUSTOM_CSR_GPIO_OEN_USER       0x803
286 #define CUSTOM_CSR_GPIO_IN_USER        0x804
287 #define CUSTOM_CSR_GPIO_OUT_USER       0x805
288 #define CUSTOM_CSR_CO_EXCEPTION_CAUSE  0x7f0
289 #define CUSTOM_CSR_CO_HWLP             0x7f1
290 #define CUSTOM_CSR_CO_AIA              0x7f2
291 
292 extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
293 
cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t * regions,const int region_num)294 static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
295 {
296     const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
297     int regs_frame_sz = 0;
298     for (int num = 0; num < region_num; num++) {
299         regs_frame_sz += regions[num].end - regions[num].start;
300     }
301     uint32_t *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
302     if (frame) {
303         cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
304         memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
305         void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
306         memset(regs_frame, 0, regs_frame_sz);
307         *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
308             .region = region,
309             .region_num = region_num,
310             .regs_frame = (uint32_t *)regs_frame
311         };
312     }
313     return frame;
314 }
315 
cpu_domain_intpri_sleep_frame_alloc_and_init(void)316 static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
317 {
318     const static cpu_domain_dev_regs_region_t regions[] = {
319         { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
320         { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
321     };
322     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
323 }
324 
cpu_domain_cache_config_sleep_frame_alloc_and_init(void)325 static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
326 {
327     const static cpu_domain_dev_regs_region_t regions[] = {
328 #if CONFIG_IDF_TARGET_ESP32C6
329         { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
330         { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
331 #elif CONFIG_IDF_TARGET_ESP32H2
332         { .start = CACHE_L1_CACHE_CTRL_REG, .end = CACHE_L1_CACHE_CTRL_REG + 4 },
333         { .start = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
334 #endif
335     };
336     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
337 }
338 
cpu_domain_plic_sleep_frame_alloc_and_init(void)339 static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
340 {
341     const static cpu_domain_dev_regs_region_t regions[] = {
342         { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
343         { .start = PLIC_MXINT_CONF_REG,   .end = PLIC_MXINT_CONF_REG + 4  },
344         { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
345         { .start = PLIC_UXINT_CONF_REG,   .end = PLIC_UXINT_CONF_REG + 4  }
346     };
347     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
348 }
349 
cpu_domain_clint_sleep_frame_alloc_and_init(void)350 static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
351 {
352     const static cpu_domain_dev_regs_region_t regions[] = {
353         { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
354         { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
355     };
356     return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
357 }
358 
esp_sleep_cpu_retention_init_impl(void)359 static esp_err_t esp_sleep_cpu_retention_init_impl(void)
360 {
361     if (s_cpu_retention.retent.critical_frame == NULL) {
362         void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
363         if (frame == NULL) {
364             goto err;
365         }
366         s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
367         rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
368     }
369     if (s_cpu_retention.retent.non_critical_frame == NULL) {
370         void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
371         if (frame == NULL) {
372             goto err;
373         }
374         s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
375     }
376     if (s_cpu_retention.retent.intpri_frame == NULL) {
377         void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
378         if (frame == NULL) {
379             goto err;
380         }
381         s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
382     }
383     if (s_cpu_retention.retent.cache_config_frame == NULL) {
384         void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
385         if (frame == NULL) {
386             goto err;
387         }
388         s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
389     }
390     if (s_cpu_retention.retent.plic_frame == NULL) {
391         void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
392         if (frame == NULL) {
393             goto err;
394         }
395         s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
396     }
397     if (s_cpu_retention.retent.clint_frame == NULL) {
398         void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
399         if (frame == NULL) {
400             goto err;
401         }
402         s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
403     }
404     return ESP_OK;
405 err:
406     esp_sleep_cpu_retention_deinit();
407     return ESP_ERR_NO_MEM;
408 }
409 
esp_sleep_cpu_retention_deinit_impl(void)410 static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
411 {
412     if (s_cpu_retention.retent.critical_frame) {
413         heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
414         s_cpu_retention.retent.critical_frame = NULL;
415         rv_core_critical_regs_frame = NULL;
416     }
417     if (s_cpu_retention.retent.non_critical_frame) {
418         heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
419         s_cpu_retention.retent.non_critical_frame = NULL;
420     }
421     if (s_cpu_retention.retent.intpri_frame) {
422         heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
423         s_cpu_retention.retent.intpri_frame = NULL;
424     }
425     if (s_cpu_retention.retent.cache_config_frame) {
426         heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
427         s_cpu_retention.retent.cache_config_frame = NULL;
428     }
429     if (s_cpu_retention.retent.plic_frame) {
430         heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
431         s_cpu_retention.retent.plic_frame = NULL;
432     }
433     if (s_cpu_retention.retent.clint_frame) {
434         heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
435         s_cpu_retention.retent.clint_frame = NULL;
436     }
437     return ESP_OK;
438 }
439 
save_mstatus_and_disable_global_int(void)440 static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
441 {
442     uint32_t mstatus;
443     __asm__ __volatile__ (
444             "csrr   %0, mstatus\n"
445             "csrci  mstatus, 0x8\n"
446             : "=r"(mstatus)
447         );
448     return mstatus;
449 }
450 
restore_mstatus(uint32_t mstatus)451 static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
452 {
453     __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
454 }
455 
rv_core_noncritical_regs_save(void)456 static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
457 {
458     assert(s_cpu_retention.retent.non_critical_frame);
459     RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
460     frame->mscratch  = RV_READ_CSR(mscratch);
461     frame->mideleg   = RV_READ_CSR(mideleg);
462     frame->misa      = RV_READ_CSR(misa);
463     frame->tselect   = RV_READ_CSR(tselect);
464     frame->tdata1    = RV_READ_CSR(tdata1);
465     frame->tdata2    = RV_READ_CSR(tdata2);
466     frame->tcontrol  = RV_READ_CSR(tcontrol);
467 
468     frame->pmpaddr0  = RV_READ_CSR(pmpaddr0);
469     frame->pmpaddr1  = RV_READ_CSR(pmpaddr1);
470     frame->pmpaddr2  = RV_READ_CSR(pmpaddr2);
471     frame->pmpaddr3  = RV_READ_CSR(pmpaddr3);
472     frame->pmpaddr4  = RV_READ_CSR(pmpaddr4);
473     frame->pmpaddr5  = RV_READ_CSR(pmpaddr5);
474     frame->pmpaddr6  = RV_READ_CSR(pmpaddr6);
475     frame->pmpaddr7  = RV_READ_CSR(pmpaddr7);
476     frame->pmpaddr8  = RV_READ_CSR(pmpaddr8);
477     frame->pmpaddr9  = RV_READ_CSR(pmpaddr9);
478     frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
479     frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
480     frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
481     frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
482     frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
483     frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
484     frame->pmpcfg0   = RV_READ_CSR(pmpcfg0);
485     frame->pmpcfg1   = RV_READ_CSR(pmpcfg1);
486     frame->pmpcfg2   = RV_READ_CSR(pmpcfg2);
487     frame->pmpcfg3   = RV_READ_CSR(pmpcfg3);
488 
489 #if SOC_CPU_HAS_PMA
490     frame->pmaaddr0  = RV_READ_CSR(CSR_PMAADDR(0));
491     frame->pmaaddr1  = RV_READ_CSR(CSR_PMAADDR(1));
492     frame->pmaaddr2  = RV_READ_CSR(CSR_PMAADDR(2));
493     frame->pmaaddr3  = RV_READ_CSR(CSR_PMAADDR(3));
494     frame->pmaaddr4  = RV_READ_CSR(CSR_PMAADDR(4));
495     frame->pmaaddr5  = RV_READ_CSR(CSR_PMAADDR(5));
496     frame->pmaaddr6  = RV_READ_CSR(CSR_PMAADDR(6));
497     frame->pmaaddr7  = RV_READ_CSR(CSR_PMAADDR(7));
498     frame->pmaaddr8  = RV_READ_CSR(CSR_PMAADDR(8));
499     frame->pmaaddr9  = RV_READ_CSR(CSR_PMAADDR(9));
500     frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10));
501     frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11));
502     frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12));
503     frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13));
504     frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14));
505     frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15));
506     frame->pmacfg0   = RV_READ_CSR(CSR_PMACFG(0));
507     frame->pmacfg1   = RV_READ_CSR(CSR_PMACFG(1));
508     frame->pmacfg2   = RV_READ_CSR(CSR_PMACFG(2));
509     frame->pmacfg3   = RV_READ_CSR(CSR_PMACFG(3));
510     frame->pmacfg4   = RV_READ_CSR(CSR_PMACFG(4));
511     frame->pmacfg5   = RV_READ_CSR(CSR_PMACFG(5));
512     frame->pmacfg6   = RV_READ_CSR(CSR_PMACFG(6));
513     frame->pmacfg7   = RV_READ_CSR(CSR_PMACFG(7));
514     frame->pmacfg8   = RV_READ_CSR(CSR_PMACFG(8));
515     frame->pmacfg9   = RV_READ_CSR(CSR_PMACFG(9));
516     frame->pmacfg10   = RV_READ_CSR(CSR_PMACFG(10));
517     frame->pmacfg11   = RV_READ_CSR(CSR_PMACFG(11));
518     frame->pmacfg12   = RV_READ_CSR(CSR_PMACFG(12));
519     frame->pmacfg13   = RV_READ_CSR(CSR_PMACFG(13));
520     frame->pmacfg14   = RV_READ_CSR(CSR_PMACFG(14));
521     frame->pmacfg15   = RV_READ_CSR(CSR_PMACFG(15));
522 #endif // SOC_CPU_HAS_PMA
523 
524     frame->utvec     = RV_READ_CSR(utvec);
525     frame->ustatus   = RV_READ_CSR(ustatus);
526     frame->uepc      = RV_READ_CSR(uepc);
527     frame->ucause    = RV_READ_CSR(ucause);
528 
529     frame->mpcer     = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
530     frame->mpcmr     = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
531     frame->mpccr     = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
532     frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
533     frame->upcer     = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
534     frame->upcmr     = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
535     frame->upccr     = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
536     frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
537     frame->ugpio_in  = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
538     frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
539     return frame;
540 }
541 
rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame * frame)542 static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
543 {
544     assert(frame);
545     RV_WRITE_CSR(mscratch, frame->mscratch);
546     RV_WRITE_CSR(mideleg,  frame->mideleg);
547     RV_WRITE_CSR(misa,     frame->misa);
548     RV_WRITE_CSR(tselect,  frame->tselect);
549     RV_WRITE_CSR(tdata1,   frame->tdata1);
550     RV_WRITE_CSR(tdata2,   frame->tdata2);
551     RV_WRITE_CSR(tcontrol, frame->tcontrol);
552     RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
553     RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
554     RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
555     RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
556     RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
557     RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
558     RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
559     RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
560     RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
561     RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
562     RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
563     RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
564     RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
565     RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
566     RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
567     RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
568     RV_WRITE_CSR(pmpcfg0,  frame->pmpcfg0);
569     RV_WRITE_CSR(pmpcfg1,  frame->pmpcfg1);
570     RV_WRITE_CSR(pmpcfg2,  frame->pmpcfg2);
571     RV_WRITE_CSR(pmpcfg3,  frame->pmpcfg3);
572 
573 #if SOC_CPU_HAS_PMA
574     RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0);
575     RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1);
576     RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2);
577     RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3);
578     RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4);
579     RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5);
580     RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6);
581     RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7);
582     RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8);
583     RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9);
584     RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10);
585     RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11);
586     RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12);
587     RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13);
588     RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14);
589     RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15);
590     RV_WRITE_CSR(CSR_PMACFG(0),  frame->pmacfg0);
591     RV_WRITE_CSR(CSR_PMACFG(1),  frame->pmacfg1);
592     RV_WRITE_CSR(CSR_PMACFG(2),  frame->pmacfg2);
593     RV_WRITE_CSR(CSR_PMACFG(3),  frame->pmacfg3);
594     RV_WRITE_CSR(CSR_PMACFG(4),  frame->pmacfg4);
595     RV_WRITE_CSR(CSR_PMACFG(5),  frame->pmacfg5);
596     RV_WRITE_CSR(CSR_PMACFG(6),  frame->pmacfg6);
597     RV_WRITE_CSR(CSR_PMACFG(7),  frame->pmacfg7);
598     RV_WRITE_CSR(CSR_PMACFG(8),  frame->pmacfg8);
599     RV_WRITE_CSR(CSR_PMACFG(9),  frame->pmacfg9);
600     RV_WRITE_CSR(CSR_PMACFG(10),  frame->pmacfg10);
601     RV_WRITE_CSR(CSR_PMACFG(11),  frame->pmacfg11);
602     RV_WRITE_CSR(CSR_PMACFG(12),  frame->pmacfg12);
603     RV_WRITE_CSR(CSR_PMACFG(13),  frame->pmacfg13);
604     RV_WRITE_CSR(CSR_PMACFG(14),  frame->pmacfg14);
605     RV_WRITE_CSR(CSR_PMACFG(15),  frame->pmacfg15);
606 #endif //SOC_CPU_HAS_PMA
607 
608     RV_WRITE_CSR(utvec,    frame->utvec);
609     RV_WRITE_CSR(ustatus,  frame->ustatus);
610     RV_WRITE_CSR(uepc,     frame->uepc);
611     RV_WRITE_CSR(ucause,   frame->ucause);
612 
613     RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
614     RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
615     RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
616     RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
617     RV_WRITE_CSR(CUSTOM_CSR_PCER_USER,    frame->upcer);
618     RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER,    frame->upcmr);
619     RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER,    frame->upccr);
620     RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
621     RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
622     RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
623 }
624 
cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t * frame)625 static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
626 {
627     assert(frame);
628     cpu_domain_dev_regs_region_t *region = frame->region;
629     uint32_t *regs_frame = frame->regs_frame;
630 
631     int offset = 0;
632     for (int i = 0; i < frame->region_num; i++) {
633         for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
634             regs_frame[offset++] = *(uint32_t *)addr;
635         }
636     }
637 }
638 
cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t * frame)639 static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
640 {
641     assert(frame);
642     cpu_domain_dev_regs_region_t *region = frame->region;
643     uint32_t *regs_frame = frame->regs_frame;
644 
645     int offset = 0;
646     for (int i = 0; i < frame->region_num; i++) {
647         for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
648             *(uint32_t *)addr = regs_frame[offset++];
649         }
650     }
651 }
652 
653 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
update_retention_frame_crc(uint32_t * frame_ptr,uint32_t frame_check_size,uint32_t * frame_crc_ptr)654 static void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
655 {
656     *(frame_crc_ptr) = esp_rom_crc32_le(0, (void *)frame_ptr, frame_check_size);
657 }
658 
validate_retention_frame_crc(uint32_t * frame_ptr,uint32_t frame_check_size,uint32_t * frame_crc_ptr)659 static void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
660 {
661     if(*(frame_crc_ptr) != esp_rom_crc32_le(0, (void *)(frame_ptr), frame_check_size)){
662         // resume uarts
663         for (int i = 0; i < SOC_UART_NUM; ++i) {
664 #ifndef CONFIG_IDF_TARGET_ESP32
665             if (!periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) {
666                 continue;
667             }
668 #endif
669             uart_ll_force_xon(i);
670         }
671 
672         /* Since it is still in the critical now, use ESP_EARLY_LOG */
673         ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted");
674         esp_restart_noos();
675     }
676 }
677 #endif
678 
679 extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
680 extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
681 typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
682 
do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,uint32_t wakeup_opt,uint32_t reject_opt,uint32_t lslp_mem_inf_fpu,bool dslp)683 static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
684         uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
685 {
686     RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
687     if ((frame->pmufunc & 0x3) == 0x1) {
688         esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_END, (void *)0);
689 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
690         /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */
691         update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
692 #endif
693         REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
694         return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
695     }
696 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
697     else {
698         validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
699     }
700 #endif
701 
702     return pmu_sleep_finish();
703 }
704 
esp_sleep_cpu_retention(uint32_t (* goto_sleep)(uint32_t,uint32_t,uint32_t,bool),uint32_t wakeup_opt,uint32_t reject_opt,uint32_t lslp_mem_inf_fpu,bool dslp)705 esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
706         uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
707 {
708     esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0);
709     uint32_t mstatus = save_mstatus_and_disable_global_int();
710 
711     cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
712     cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
713     cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
714     cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
715     RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
716 
717 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
718     /* Minus sizeof(long) is for bypass `frame_crc` field */
719     update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
720 #endif
721 
722     esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
723 
724 #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
725     validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
726 #endif
727 
728     rv_core_noncritical_regs_restore(frame);
729     cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
730     cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
731     cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
732     cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
733     restore_mstatus(mstatus);
734     return err;
735 }
736 
737 #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
738 
739 
740 #if SOC_PM_SUPPORT_CPU_PD
741 
esp_sleep_cpu_retention_init(void)742 esp_err_t esp_sleep_cpu_retention_init(void)
743 {
744     esp_err_t err = ESP_OK;
745 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
746     err = esp_sleep_cpu_pd_low_init();
747 #elif SOC_PM_CPU_RETENTION_BY_SW
748     err = esp_sleep_cpu_retention_init_impl();
749 #endif
750     return err;
751 }
752 
esp_sleep_cpu_retention_deinit(void)753 esp_err_t esp_sleep_cpu_retention_deinit(void)
754 {
755     esp_err_t err = ESP_OK;
756 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
757     err = esp_sleep_cpu_pd_low_deinit();
758 #elif SOC_PM_CPU_RETENTION_BY_SW
759     err = esp_sleep_cpu_retention_deinit_impl();
760 #endif
761     return err;
762 }
763 
cpu_domain_pd_allowed(void)764 bool cpu_domain_pd_allowed(void)
765 {
766 #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
767     return (s_cpu_retention.retent.cpu_pd_mem != NULL);
768 #elif SOC_PM_CPU_RETENTION_BY_SW
769     return (s_cpu_retention.retent.critical_frame != NULL) && \
770          (s_cpu_retention.retent.non_critical_frame != NULL) && \
771          (s_cpu_retention.retent.intpri_frame != NULL) && \
772          (s_cpu_retention.retent.cache_config_frame != NULL) && \
773          (s_cpu_retention.retent.plic_frame != NULL) && \
774          (s_cpu_retention.retent.clint_frame != NULL);
775 #else
776     return false;
777 #endif
778 }
779 
sleep_cpu_configure(bool light_sleep_enable)780 esp_err_t sleep_cpu_configure(bool light_sleep_enable)
781 {
782 #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
783     if (light_sleep_enable) {
784         ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
785     } else {
786         ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
787     }
788 #endif
789     return ESP_OK;
790 }
791 
792 #endif
793