1 /*
2  * SPDX-FileCopyrightText: 2016-2023 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <stdbool.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <sys/param.h>
12 
13 #include "esp_attr.h"
14 #include "esp_err.h"
15 #include "esp_pm.h"
16 #include "esp_log.h"
17 #include "esp_cpu.h"
18 #include "esp_clk_tree.h"
19 
20 #include "esp_private/crosscore_int.h"
21 
22 #include "soc/rtc.h"
23 #include "hal/uart_ll.h"
24 #include "hal/uart_types.h"
25 #include "driver/uart.h"
26 #include "driver/gpio.h"
27 
28 #if CONFIG_XTENSA_TIMER
29 #include "xtensa/core-macros.h"
30 #endif
31 
32 #if SOC_SPI_MEM_SUPPORT_TIME_TUNING
33 #include "esp_private/mspi_timing_tuning.h"
34 #endif
35 
36 #include "esp_private/pm_impl.h"
37 #include "esp_private/pm_trace.h"
38 #include "esp_private/esp_timer_private.h"
39 #include "esp_private/esp_clk.h"
40 #include "esp_private/sleep_cpu.h"
41 #include "esp_private/sleep_gpio.h"
42 #include "esp_private/sleep_modem.h"
43 #include "esp_sleep.h"
44 #include <zephyr/kernel.h>
45 
46 #include "sdkconfig.h"
47 
48 #ifdef CONFIG_XTENSA_TIMER
49 /* CCOMPARE update timeout, in CPU cycles. Any value above ~600 cycles will work
50  * for the purpose of detecting a deadlock.
51  */
52 #define CCOMPARE_UPDATE_TIMEOUT 1000000
53 
54 /* When changing CCOMPARE, don't allow changes if the difference is less
55  * than this. This is to prevent setting CCOMPARE below CCOUNT.
56  */
57 #define CCOMPARE_MIN_CYCLES_IN_FUTURE 1000
58 #endif // CONFIG_XTENSA_TIMER
59 
60 /* When light sleep is used, wake this number of microseconds earlier than
61  * the next tick.
62  */
63 #define LIGHT_SLEEP_EARLY_WAKEUP_US 100
64 
65 #if CONFIG_IDF_TARGET_ESP32
66 /* Minimal divider at which REF_CLK_FREQ can be obtained */
67 #define REF_CLK_DIV_MIN 10
68 #elif CONFIG_IDF_TARGET_ESP32S2
69 /* Minimal divider at which REF_CLK_FREQ can be obtained */
70 #define REF_CLK_DIV_MIN 2
71 #elif CONFIG_IDF_TARGET_ESP32S3
72 /* Minimal divider at which REF_CLK_FREQ can be obtained */
73 #define REF_CLK_DIV_MIN 2         // TODO: IDF-5660
74 #elif CONFIG_IDF_TARGET_ESP32C3
75 #define REF_CLK_DIV_MIN 2
76 #elif CONFIG_IDF_TARGET_ESP32C2
77 #define REF_CLK_DIV_MIN 2
78 #elif CONFIG_IDF_TARGET_ESP32C6
79 #define REF_CLK_DIV_MIN 2
80 #elif CONFIG_IDF_TARGET_ESP32H2
81 #define REF_CLK_DIV_MIN 2
82 #endif
83 
84 #ifdef CONFIG_PM_PROFILING
85 #define WITH_PROFILING
86 #endif
87 
88 #define ENTER_CRITICAL(lock_ptr)    do { *lock_ptr = irq_lock(); } while(0)
89 #define EXIT_CRITICAL(lock_ptr)     irq_unlock(*lock_ptr);
90 
91 static int s_switch_lock;
92 /* The following state variables are protected using s_switch_lock: */
93 /* Current sleep mode; When switching, contains old mode until switch is complete */
94 static pm_mode_t s_mode = PM_MODE_CPU_MAX;
95 /* True when switch is in progress */
96 static volatile bool s_is_switching;
97 /* Number of times each mode was locked */
98 static size_t s_mode_lock_counts[PM_MODE_COUNT];
99 /* Bit mask of locked modes. BIT(i) is set iff s_mode_lock_counts[i] > 0. */
100 static uint32_t s_mode_mask;
101 
102 /* A flag indicating that Idle hook has run on a given CPU;
103  * Next interrupt on the same CPU will take s_rtos_lock_handle.
104  */
105 static bool s_core_idle[CONFIG_MP_MAX_NUM_CPUS];
106 
107 /* When no RTOS tasks are active, these locks are released to allow going into
108  * a lower power mode. Used by ISR hook and idle hook.
109  */
110 static esp_pm_lock_handle_t s_rtos_lock_handle[CONFIG_MP_MAX_NUM_CPUS];
111 
112 /* Lookup table of CPU frequency configs to be used in each mode.
113  * Initialized by esp_pm_impl_init and modified by esp_pm_configure.
114  */
115 static rtc_cpu_freq_config_t s_cpu_freq_by_mode[PM_MODE_COUNT];
116 
117 /* Whether automatic light sleep is enabled */
118 static bool s_light_sleep_en = false;
119 
120 /* When configuration is changed, current frequency may not match the
121  * newly configured frequency for the current mode. This is an indicator
122  * to the mode switch code to get the actual current frequency instead of
123  * relying on the current mode.
124  */
125 static bool s_config_changed = false;
126 
127 #ifdef WITH_PROFILING
128 /* Time, in microseconds, spent so far in each mode */
129 static pm_time_t s_time_in_mode[PM_MODE_COUNT];
130 /* Timestamp, in microseconds, when the mode switch last happened */
131 static pm_time_t s_last_mode_change_time;
132 /* User-readable mode names, used by esp_pm_impl_dump_stats */
133 static const char* s_mode_names[] = {
134         "SLEEP",
135         "APB_MIN",
136         "APB_MAX",
137         "CPU_MAX"
138 };
139 static uint32_t s_light_sleep_counts, s_light_sleep_reject_counts;
140 #endif // WITH_PROFILING
141 
142 #ifdef CONFIG_XTENSA_TIMER
143 /* Indicates to the ISR hook that CCOMPARE needs to be updated on the given CPU.
144  * Used in conjunction with cross-core interrupt to update CCOMPARE on the other CPU.
145  */
146 static volatile bool s_need_update_ccompare[CONFIG_MP_MAX_NUM_CPUS];
147 
148 /* Divider and multiplier used to adjust (ccompare - ccount) duration.
149  * Only set to non-zero values when switch is in progress.
150  */
151 static uint32_t s_ccount_div;
152 static uint32_t s_ccount_mul;
153 
154 static void update_ccompare(void);
155 #endif // CONFIG_XTENSA_TIMER
156 
157 static const char* TAG = "pm";
158 
159 static void do_switch(pm_mode_t new_mode);
160 static void leave_idle(void);
161 static void on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us);
162 
esp_pm_impl_get_mode(esp_pm_lock_type_t type,int arg)163 pm_mode_t esp_pm_impl_get_mode(esp_pm_lock_type_t type, int arg)
164 {
165     (void) arg;
166     if (type == ESP_PM_CPU_FREQ_MAX) {
167         return PM_MODE_CPU_MAX;
168     } else if (type == ESP_PM_APB_FREQ_MAX) {
169         return PM_MODE_APB_MAX;
170     } else if (type == ESP_PM_NO_LIGHT_SLEEP) {
171         return PM_MODE_APB_MIN;
172     } else {
173         // unsupported mode
174         abort();
175     }
176 }
177 
esp_pm_sleep_configure(const void * vconfig)178 static esp_err_t esp_pm_sleep_configure(const void *vconfig)
179 {
180     esp_err_t err = ESP_OK;
181     const esp_pm_config_t* config = (const esp_pm_config_t*) vconfig;
182 
183 #if SOC_PM_SUPPORT_CPU_PD
184     err = sleep_cpu_configure(config->light_sleep_enable);
185     if (err != ESP_OK) {
186         return err;
187     }
188 #endif
189 
190     err = sleep_modem_configure(config->max_freq_mhz, config->min_freq_mhz, config->light_sleep_enable);
191     return err;
192 }
193 
esp_pm_configure(const void * vconfig)194 esp_err_t esp_pm_configure(const void* vconfig)
195 {
196 #ifndef CONFIG_PM_ENABLE
197     return ESP_ERR_NOT_SUPPORTED;
198 #endif
199 
200     const esp_pm_config_t* config = (const esp_pm_config_t*) vconfig;
201 
202     int min_freq_mhz = config->min_freq_mhz;
203     int max_freq_mhz = config->max_freq_mhz;
204 
205     if (min_freq_mhz > max_freq_mhz) {
206         return ESP_ERR_INVALID_ARG;
207     }
208 
209     rtc_cpu_freq_config_t freq_config;
210     if (!rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &freq_config)) {
211         ESP_LOGW(TAG, "invalid min_freq_mhz value (%d)", min_freq_mhz);
212         return ESP_ERR_INVALID_ARG;
213     }
214 
215     int xtal_freq_mhz = esp_clk_xtal_freq() / MHZ(1);
216     if (min_freq_mhz < xtal_freq_mhz && min_freq_mhz * MHZ(1) / REF_CLK_FREQ < REF_CLK_DIV_MIN) {
217         ESP_LOGW(TAG, "min_freq_mhz should be >= %d", REF_CLK_FREQ * REF_CLK_DIV_MIN / MHZ(1));
218         return ESP_ERR_INVALID_ARG;
219     }
220 
221     if (!rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &freq_config)) {
222         ESP_LOGW(TAG, "invalid max_freq_mhz value (%d)", max_freq_mhz);
223         return ESP_ERR_INVALID_ARG;
224     }
225 
226 #if CONFIG_IDF_TARGET_ESP32
227     int apb_max_freq = max_freq_mhz; /* CPU frequency in APB_MAX mode */
228     if (max_freq_mhz == 240) {
229         /* We can't switch between 240 and 80/160 without disabling PLL,
230          * so use 240MHz CPU frequency when 80MHz APB frequency is requested.
231          */
232         apb_max_freq = 240;
233     } else if (max_freq_mhz == 160 || max_freq_mhz == 80) {
234         /* Otherwise, can use 80MHz
235          * CPU frequency when 80MHz APB frequency is requested.
236          */
237         apb_max_freq = 80;
238     }
239 #else
240     /* Maximum SOC APB clock frequency is 40 MHz, maximum Modem (WiFi,
241      * Bluetooth, etc..) APB clock frequency is 80 MHz */
242     int apb_clk_freq = esp_clk_apb_freq() / MHZ(1);
243 #if CONFIG_ESP_WIFI_ENABLED || CONFIG_BT_ENABLED || CONFIG_IEEE802154_ENABLED
244     apb_clk_freq = MAX(apb_clk_freq, MODEM_REQUIRED_MIN_APB_CLK_FREQ / MHZ(1));
245 #endif
246     int apb_max_freq = MIN(max_freq_mhz, apb_clk_freq); /* CPU frequency in APB_MAX mode */
247 #endif
248 
249     apb_max_freq = MAX(apb_max_freq, min_freq_mhz);
250 
251     ESP_LOGI(TAG, "Frequency switching config: "
252                   "CPU_MAX: %d, APB_MAX: %d, APB_MIN: %d, Light sleep: %s",
253                   max_freq_mhz,
254                   apb_max_freq,
255                   min_freq_mhz,
256                   config->light_sleep_enable ? "ENABLED" : "DISABLED");
257 
258     ENTER_CRITICAL(&s_switch_lock);
259 
260     bool res __attribute__((unused));
261     res = rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_CPU_MAX]);
262     assert(res);
263     res = rtc_clk_cpu_freq_mhz_to_config(apb_max_freq, &s_cpu_freq_by_mode[PM_MODE_APB_MAX]);
264     assert(res);
265     res = rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_APB_MIN]);
266     assert(res);
267     s_cpu_freq_by_mode[PM_MODE_LIGHT_SLEEP] = s_cpu_freq_by_mode[PM_MODE_APB_MIN];
268     s_light_sleep_en = config->light_sleep_enable;
269     s_config_changed = true;
270     EXIT_CRITICAL(&s_switch_lock);
271 
272     esp_pm_sleep_configure(config);
273 
274     return ESP_OK;
275 }
276 
esp_pm_get_configuration(void * vconfig)277 esp_err_t esp_pm_get_configuration(void* vconfig)
278 {
279     if (vconfig == NULL) {
280         return ESP_ERR_INVALID_ARG;
281     }
282 
283     esp_pm_config_t* config = (esp_pm_config_t*) vconfig;
284 
285     ENTER_CRITICAL(&s_switch_lock);
286     config->light_sleep_enable = s_light_sleep_en;
287     config->max_freq_mhz = s_cpu_freq_by_mode[PM_MODE_CPU_MAX].freq_mhz;
288     config->min_freq_mhz = s_cpu_freq_by_mode[PM_MODE_APB_MIN].freq_mhz;
289     EXIT_CRITICAL(&s_switch_lock);
290 
291     return ESP_OK;
292 }
293 
get_lowest_allowed_mode(void)294 static pm_mode_t IRAM_ATTR get_lowest_allowed_mode(void)
295 {
296     /* TODO: optimize using ffs/clz */
297     if (s_mode_mask >= BIT(PM_MODE_CPU_MAX)) {
298         return PM_MODE_CPU_MAX;
299     } else if (s_mode_mask >= BIT(PM_MODE_APB_MAX)) {
300         return PM_MODE_APB_MAX;
301     } else if (s_mode_mask >= BIT(PM_MODE_APB_MIN) || !s_light_sleep_en) {
302         return PM_MODE_APB_MIN;
303     } else {
304         return PM_MODE_LIGHT_SLEEP;
305     }
306 }
307 
esp_pm_impl_switch_mode(pm_mode_t mode,pm_mode_switch_t lock_or_unlock,pm_time_t now)308 void IRAM_ATTR esp_pm_impl_switch_mode(pm_mode_t mode,
309         pm_mode_switch_t lock_or_unlock, pm_time_t now)
310 {
311     bool need_switch = false;
312     uint32_t mode_mask = BIT(mode);
313     ENTER_CRITICAL(&s_switch_lock);
314     uint32_t count;
315     if (lock_or_unlock == MODE_LOCK) {
316         count = ++s_mode_lock_counts[mode];
317     } else {
318         count = s_mode_lock_counts[mode]--;
319     }
320     if (count == 1) {
321         if (lock_or_unlock == MODE_LOCK) {
322             s_mode_mask |= mode_mask;
323         } else {
324             s_mode_mask &= ~mode_mask;
325         }
326         need_switch = true;
327     }
328 
329     pm_mode_t new_mode = s_mode;
330     if (need_switch) {
331         new_mode = get_lowest_allowed_mode();
332 #ifdef WITH_PROFILING
333         if (s_last_mode_change_time != 0) {
334             pm_time_t diff = now - s_last_mode_change_time;
335             s_time_in_mode[s_mode] += diff;
336         }
337         s_last_mode_change_time = now;
338 #endif // WITH_PROFILING
339     }
340     EXIT_CRITICAL(&s_switch_lock);
341     if (need_switch) {
342         do_switch(new_mode);
343     }
344 }
345 
346 /**
347  * @brief Update clock dividers in esp_timer and adjust CCOMPARE
348  * values on both CPUs.
349  * @param old_ticks_per_us old CPU frequency
350  * @param ticks_per_us new CPU frequency
351  */
on_freq_update(uint32_t old_ticks_per_us,uint32_t ticks_per_us)352 static void IRAM_ATTR on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us)
353 {
354     uint32_t old_apb_ticks_per_us = MIN(old_ticks_per_us, 80);
355     uint32_t apb_ticks_per_us = MIN(ticks_per_us, 80);
356     /* Update APB frequency value used by the timer */
357     if (old_apb_ticks_per_us != apb_ticks_per_us) {
358         esp_timer_private_update_apb_freq(apb_ticks_per_us);
359     }
360 
361 #ifdef CONFIG_XTENSA_TIMER
362 #ifdef XT_RTOS_TIMER_INT
363     /* Calculate new tick divisor */
364     _xt_tick_divisor = ticks_per_us * MHZ(1) / XT_TICK_PER_SEC;
365 #endif
366 
367     int core_id = esp_cpu_get_core_id();
368     if (s_rtos_lock_handle[core_id] != NULL) {
369         ESP_PM_TRACE_ENTER(CCOMPARE_UPDATE, core_id);
370         /* ccount_div and ccount_mul are used in esp_pm_impl_update_ccompare
371          * to calculate new CCOMPARE value.
372          */
373         s_ccount_div = old_ticks_per_us;
374         s_ccount_mul = ticks_per_us;
375 
376         /* Update CCOMPARE value on this CPU */
377         update_ccompare();
378 
379 #if CONFIG_MP_MAX_NUM_CPUS == 2
380         /* Send interrupt to the other CPU to update CCOMPARE value */
381         int other_core_id = (core_id == 0) ? 1 : 0;
382 
383         s_need_update_ccompare[other_core_id] = true;
384         esp_crosscore_int_send_freq_switch(other_core_id);
385 
386         int timeout = 0;
387         while (s_need_update_ccompare[other_core_id]) {
388             if (++timeout == CCOMPARE_UPDATE_TIMEOUT) {
389                 assert(false && "failed to update CCOMPARE, possible deadlock");
390             }
391         }
392 #endif // CONFIG_MP_MAX_NUM_CPUS == 2
393 
394         s_ccount_mul = 0;
395         s_ccount_div = 0;
396         ESP_PM_TRACE_EXIT(CCOMPARE_UPDATE, core_id);
397     }
398 #endif // CONFIG_XTENSA_TIMER
399 }
400 
401 /**
402  * Perform the switch to new power mode.
403  * Currently only changes the CPU frequency and adjusts clock dividers.
404  * No light sleep yet.
405  * @param new_mode mode to switch to
406  */
do_switch(pm_mode_t new_mode)407 static void IRAM_ATTR do_switch(pm_mode_t new_mode)
408 {
409     const int core_id = esp_cpu_get_core_id();
410 
411     do {
412         ENTER_CRITICAL(&s_switch_lock);
413         if (!s_is_switching) {
414             break;
415         }
416 #ifdef CONFIG_XTENSA_TIMER
417         if (s_need_update_ccompare[core_id]) {
418             s_need_update_ccompare[core_id] = false;
419         }
420 #endif
421         EXIT_CRITICAL(&s_switch_lock);
422     } while (true);
423     if (new_mode == s_mode) {
424         EXIT_CRITICAL(&s_switch_lock);
425         return;
426     }
427     s_is_switching = true;
428     bool config_changed = s_config_changed;
429     s_config_changed = false;
430     EXIT_CRITICAL(&s_switch_lock);
431 
432     rtc_cpu_freq_config_t new_config = s_cpu_freq_by_mode[new_mode];
433     rtc_cpu_freq_config_t old_config;
434 
435     if (!config_changed) {
436         old_config = s_cpu_freq_by_mode[s_mode];
437     } else {
438         rtc_clk_cpu_freq_get_config(&old_config);
439     }
440 
441     if (new_config.freq_mhz != old_config.freq_mhz) {
442         uint32_t old_ticks_per_us = old_config.freq_mhz;
443         uint32_t new_ticks_per_us = new_config.freq_mhz;
444 
445         bool switch_down = new_ticks_per_us < old_ticks_per_us;
446 
447         ESP_PM_TRACE_ENTER(FREQ_SWITCH, core_id);
448         if (switch_down) {
449             on_freq_update(old_ticks_per_us, new_ticks_per_us);
450         }
451        if (new_config.source == SOC_CPU_CLK_SRC_PLL) {
452             rtc_clk_cpu_freq_set_config_fast(&new_config);
453 #if SOC_SPI_MEM_SUPPORT_TIME_TUNING
454             mspi_timing_change_speed_mode_cache_safe(false);
455 #endif
456         } else {
457 #if SOC_SPI_MEM_SUPPORT_TIME_TUNING
458             mspi_timing_change_speed_mode_cache_safe(true);
459 #endif
460             rtc_clk_cpu_freq_set_config_fast(&new_config);
461         }
462         if (!switch_down) {
463             on_freq_update(old_ticks_per_us, new_ticks_per_us);
464         }
465         ESP_PM_TRACE_EXIT(FREQ_SWITCH, core_id);
466     }
467 
468     ENTER_CRITICAL(&s_switch_lock);
469     s_mode = new_mode;
470     s_is_switching = false;
471     EXIT_CRITICAL(&s_switch_lock);
472 }
473 
474 #ifdef CONFIG_XTENSA_TIMER
475 /**
476  * @brief Calculate new CCOMPARE value based on s_ccount_{mul,div}
477  *
478  * Adjusts CCOMPARE value so that the interrupt happens at the same time as it
479  * would happen without the frequency change.
480  * Assumes that the new_frequency = old_frequency * s_ccount_mul / s_ccount_div.
481  */
update_ccompare(void)482 static void IRAM_ATTR update_ccompare(void)
483 {
484 #if CONFIG_PM_UPDATE_CCOMPARE_HLI_WORKAROUND
485     /* disable level 4 and below */
486     uint32_t irq_status = XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2);
487 #endif
488     uint32_t ccount = esp_cpu_get_cycle_count();
489     uint32_t ccompare = XTHAL_GET_CCOMPARE(XT_TIMER_INDEX);
490     if ((ccompare - CCOMPARE_MIN_CYCLES_IN_FUTURE) - ccount < UINT32_MAX / 2) {
491         uint32_t diff = ccompare - ccount;
492         uint32_t diff_scaled = (diff * s_ccount_mul + s_ccount_div - 1) / s_ccount_div;
493         if (diff_scaled < _xt_tick_divisor) {
494             uint32_t new_ccompare = ccount + diff_scaled;
495             XTHAL_SET_CCOMPARE(XT_TIMER_INDEX, new_ccompare);
496         }
497     }
498 #if CONFIG_PM_UPDATE_CCOMPARE_HLI_WORKAROUND
499     XTOS_RESTORE_INTLEVEL(irq_status);
500 #endif
501 }
502 #endif // CONFIG_XTENSA_TIMER
503 
leave_idle(void)504 static void IRAM_ATTR leave_idle(void)
505 {
506     int core_id = esp_cpu_get_core_id();
507     if (s_core_idle[core_id]) {
508         // TODO: possible optimization: raise frequency here first
509         esp_pm_lock_acquire(s_rtos_lock_handle[core_id]);
510         s_core_idle[core_id] = false;
511     }
512 }
513 
514 #ifdef WITH_PROFILING
esp_pm_impl_dump_stats(FILE * out)515 void esp_pm_impl_dump_stats(FILE* out)
516 {
517     pm_time_t time_in_mode[PM_MODE_COUNT];
518 
519     ENTER_CRITICAL(&s_switch_lock);
520     memcpy(time_in_mode, s_time_in_mode, sizeof(time_in_mode));
521     pm_time_t last_mode_change_time = s_last_mode_change_time;
522     pm_mode_t cur_mode = s_mode;
523     pm_time_t now = pm_get_time();
524     bool light_sleep_en = s_light_sleep_en;
525     uint32_t light_sleep_counts = s_light_sleep_counts;
526     uint32_t light_sleep_reject_counts = s_light_sleep_reject_counts;
527     EXIT_CRITICAL(&s_switch_lock);
528 
529     time_in_mode[cur_mode] += now - last_mode_change_time;
530 
531     fprintf(out, "\nMode stats:\n");
532     fprintf(out, "%-8s  %-10s  %-10s  %-10s\n", "Mode", "CPU_freq", "Time(us)", "Time(%)");
533     for (int i = 0; i < PM_MODE_COUNT; ++i) {
534         if (i == PM_MODE_LIGHT_SLEEP && !light_sleep_en) {
535             /* don't display light sleep mode if it's not enabled */
536             continue;
537         }
538         fprintf(out, "%-8s  %-3"PRIu32"M%-7s %-10lld  %-2d%%\n",
539                 s_mode_names[i],
540                 s_cpu_freq_by_mode[i].freq_mhz,
541                 "",                                     //Empty space to align columns
542                 time_in_mode[i],
543                 (int) (time_in_mode[i] * 100 / now));
544     }
545     if (light_sleep_en){
546         fprintf(out, "\nSleep stats:\n");
547         fprintf(out, "light_sleep_counts:%ld  light_sleep_reject_counts:%ld\n", light_sleep_counts, light_sleep_reject_counts);
548     }
549 }
550 #endif // WITH_PROFILING
551 
esp_pm_impl_get_cpu_freq(pm_mode_t mode)552 int esp_pm_impl_get_cpu_freq(pm_mode_t mode)
553 {
554     int freq_mhz;
555     if (mode >= PM_MODE_LIGHT_SLEEP && mode < PM_MODE_COUNT) {
556         ENTER_CRITICAL(&s_switch_lock);
557         freq_mhz = s_cpu_freq_by_mode[mode].freq_mhz;
558         EXIT_CRITICAL(&s_switch_lock);
559     } else {
560         abort();
561     }
562     return freq_mhz;
563 }
564 
esp_pm_impl_init(void)565 void esp_pm_impl_init(void)
566 {
567 #if defined(CONFIG_ESP_CONSOLE_UART)
568     //This clock source should be a source which won't be affected by DFS
569     uart_sclk_t clk_source = UART_SCLK_DEFAULT;
570 #if SOC_UART_SUPPORT_REF_TICK
571     clk_source = UART_SCLK_REF_TICK;
572 #elif SOC_UART_SUPPORT_XTAL_CLK
573     clk_source = UART_SCLK_XTAL;
574 #else
575     #error "No UART clock source is aware of DFS"
576 #endif // SOC_UART_SUPPORT_xxx
577     while(!uart_ll_is_tx_idle(UART_LL_GET_HW(CONFIG_ESP_CONSOLE_UART_NUM)));
578     /* When DFS is enabled, override system setting and use REFTICK as UART clock source */
579     uart_ll_set_sclk(UART_LL_GET_HW(CONFIG_ESP_CONSOLE_UART_NUM), clk_source);
580 
581     uint32_t sclk_freq;
582     esp_err_t err = esp_clk_tree_src_get_freq_hz((soc_module_clk_t)clk_source,
583             ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &sclk_freq);
584 
585     if (err != ESP_OK) {
586         ESP_LOGI(TAG, "could not get UART clock frequency");
587         return;
588     }
589 
590     uart_ll_set_baudrate(UART_LL_GET_HW(CONFIG_ESP_CONSOLE_UART_NUM), CONFIG_ESP_CONSOLE_UART_BAUDRATE, sclk_freq);
591 #endif // CONFIG_ESP_CONSOLE_UART
592 
593 #ifdef CONFIG_PM_TRACE
594     esp_pm_trace_init();
595 #endif
596 
597     ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos0",
598             &s_rtos_lock_handle[0]));
599     ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[0]));
600 
601 #if CONFIG_MP_MAX_NUM_CPUS == 2
602     ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos1",
603             &s_rtos_lock_handle[1]));
604     ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[1]));
605 #endif // CONFIG_MP_MAX_NUM_CPUS == 2
606 
607     /* Configure all modes to use the default CPU frequency.
608      * This will be modified later by a call to esp_pm_configure.
609      */
610     rtc_cpu_freq_config_t default_config;
611     if (!rtc_clk_cpu_freq_mhz_to_config(CONFIG_ESP_DEFAULT_CPU_FREQ_MHZ, &default_config)) {
612         assert(false && "unsupported frequency");
613     }
614     for (size_t i = 0; i < PM_MODE_COUNT; ++i) {
615         s_cpu_freq_by_mode[i] = default_config;
616     }
617 
618 #ifdef CONFIG_PM_DFS_INIT_AUTO
619     int xtal_freq_mhz = esp_clk_xtal_freq() / MHZ(1);
620     esp_pm_config_t cfg = {
621         .max_freq_mhz = CONFIG_ESP_DEFAULT_CPU_FREQ_MHZ,
622         .min_freq_mhz = xtal_freq_mhz,
623     };
624 
625     esp_pm_configure(&cfg);
626 #endif //CONFIG_PM_DFS_INIT_AUTO
627 }
628 
esp_pm_impl_idle_hook(void)629 void esp_pm_impl_idle_hook(void)
630 {
631     int core_id = esp_cpu_get_core_id();
632 
633     k_sched_lock();
634     ENTER_CRITICAL(&s_switch_lock);
635     if (!s_core_idle[core_id]) {
636         esp_pm_lock_release(s_rtos_lock_handle[core_id]);
637         s_core_idle[core_id] = true;
638     }
639     EXIT_CRITICAL(&s_switch_lock);
640     k_sched_unlock();
641 
642     ESP_PM_TRACE_ENTER(IDLE, core_id);
643 }
644 
esp_pm_impl_isr_hook(void)645 void IRAM_ATTR esp_pm_impl_isr_hook(void)
646 {
647     int core_id = esp_cpu_get_core_id();
648     ESP_PM_TRACE_ENTER(ISR_HOOK, core_id);
649     /* Prevent higher level interrupts (than the one this function was called from)
650      * from happening in this section, since they will also call into esp_pm_impl_isr_hook.
651      */
652     ENTER_CRITICAL(&s_switch_lock);
653 #if defined(CONFIG_XTENSA_TIMER) && (CONFIG_MP_MAX_NUM_CPUS == 2)
654     if (s_need_update_ccompare[core_id]) {
655         update_ccompare();
656         s_need_update_ccompare[core_id] = false;
657     } else {
658         leave_idle();
659     }
660 #else
661     leave_idle();
662 #endif // CONFIG_XTENSA_TIMER && CONFIG_MP_MAX_NUM_CPUS == 2
663     EXIT_CRITICAL(&s_switch_lock);
664     ESP_PM_TRACE_EXIT(ISR_HOOK, core_id);
665 }
666