1 /*
2  * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include "sdkconfig.h"
8 #include <stdint.h>
9 #include <assert.h>
10 #include "soc/soc.h"
11 #include "soc/soc_caps.h"
12 
13 // TODO: IDF-5645
14 #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
15 #include "soc/lp_aon_reg.h"
16 #include "soc/pcr_reg.h"
17 #define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
18 #define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
19 #else
20 #include "soc/rtc_cntl_reg.h"
21 #endif
22 
23 #include "hal/soc_hal.h"
24 #include "esp_bit_defs.h"
25 #include "esp_attr.h"
26 #include "esp_err.h"
27 #include "esp_cpu.h"
28 #if __XTENSA__
29 #include "xtensa/config/core-isa.h"
30 #else
31 #include "soc/system_reg.h"     // For SYSTEM_CPU_PER_CONF_REG
32 #include "soc/dport_access.h"   // For Dport access
33 #include "riscv/semihosting.h"
34 #endif
35 #if SOC_CPU_HAS_FLEXIBLE_INTC
36 #include "riscv/instruction_decode.h"
37 #endif
38 
39 
40 /* --------------------------------------------------- CPU Control -----------------------------------------------------
41  *
42  * ------------------------------------------------------------------------------------------------------------------ */
43 
esp_cpu_stall(int core_id)44 void esp_cpu_stall(int core_id)
45 {
46     assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
47 #if SOC_CPU_CORES_NUM > 1   // We don't allow stalling of the current core
48     /*
49     We need to write the value "0x86" to stall a particular core. The write location is split into two separate
50     bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
51     "c0" and "c1" bit fields.
52 
53     Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
54     "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
55     file's "rodata" section (see IDF-5214).
56     */
57     int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
58     int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
59     int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
60     int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
61     CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
62     SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
63     CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
64     SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
65 #endif
66 }
67 
esp_cpu_unstall(int core_id)68 void esp_cpu_unstall(int core_id)
69 {
70     assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
71 #if SOC_CPU_CORES_NUM > 1   // We don't allow stalling of the current core
72     /*
73     We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
74     two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
75     its own pair of "c0" and "c1" bit fields.
76 
77     Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
78     "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
79     file's "rodata" section (see IDF-5214).
80     */
81     int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
82     int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
83     CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
84     CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
85 #endif
86 }
87 
esp_cpu_reset(int core_id)88 void esp_cpu_reset(int core_id)
89 {
90 #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5645
91     SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
92 #else
93     assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
94 #if SOC_CPU_CORES_NUM > 1
95     /*
96     Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
97     "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
98     file's "rodata" section (see IDF-5214).
99     */
100     int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
101 #else // SOC_CPU_CORES_NUM > 1
102     int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
103 #endif // SOC_CPU_CORES_NUM > 1
104     SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
105 #endif
106 }
107 
esp_cpu_wait_for_intr(void)108 void esp_cpu_wait_for_intr(void)
109 {
110 #if __XTENSA__
111     xt_utils_wait_for_intr();
112 #else
113     // TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
114     if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
115         /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
116            so do not enter that mode when debugger is connected */
117         return;
118     }
119     rv_utils_wait_for_intr();
120 #endif // __XTENSA__
121 }
122 
123 /* ---------------------------------------------------- Debugging ------------------------------------------------------
124  *
125  * ------------------------------------------------------------------------------------------------------------------ */
126 
127 // --------------- Breakpoints/Watchpoints -----------------
128 
129 #if SOC_CPU_BREAKPOINTS_NUM > 0
esp_cpu_set_breakpoint(int bp_num,const void * bp_addr)130 esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
131 {
132     /*
133     Todo:
134     - Check that bp_num is in range
135     */
136 #if __XTENSA__
137     xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
138 #else
139     if (esp_cpu_dbgr_is_attached()) {
140         /* If we want to set breakpoint which when hit transfers control to debugger
141          * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
142          * That `action` value is supported only when `dmode` of `tdata1` is set.
143          * But `dmode` can be modified by debugger only (from Debug Mode).
144          *
145          * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
146          */
147         long args[] = {true, bp_num, (long)bp_addr};
148         int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
149         if (ret == 0) {
150             return ESP_ERR_INVALID_RESPONSE;
151         }
152     } else {
153         rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
154 	}
155 #endif // __XTENSA__
156     return ESP_OK;
157 }
158 
esp_cpu_clear_breakpoint(int bp_num)159 esp_err_t esp_cpu_clear_breakpoint(int bp_num)
160 {
161     /*
162     Todo:
163     - Check if the bp_num is valid
164     */
165 #if __XTENSA__
166     xt_utils_clear_breakpoint(bp_num);
167 #else
168     if (esp_cpu_dbgr_is_attached()) {
169         // See description in esp_cpu_set_breakpoint()
170         long args[] = {false, bp_num};
171         int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
172         if (ret == 0) {
173             return ESP_ERR_INVALID_RESPONSE;
174         }
175     } else {
176         rv_utils_clear_breakpoint(bp_num);
177 	}
178 #endif // __XTENSA__
179     return ESP_OK;
180 }
181 #endif // SOC_CPU_BREAKPOINTS_NUM > 0
182 
183 #if SOC_CPU_WATCHPOINTS_NUM > 0
esp_cpu_set_watchpoint(int wp_num,const void * wp_addr,size_t size,esp_cpu_watchpoint_trigger_t trigger)184 esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
185 {
186     /*
187     Todo:
188     - Check if the wp_num is already in use
189     */
190     if (wp_num < 0 || wp_num >= SOC_CPU_WATCHPOINTS_NUM) {
191         return ESP_ERR_INVALID_ARG;
192     }
193 
194     // Check that the watched region's start address is naturally aligned to the size of the region
195     if ((uint32_t)wp_addr % size) {
196         return ESP_ERR_INVALID_ARG;
197     }
198 
199     // Check if size is 2^n, and size is in the range of [1 ... SOC_CPU_WATCHPOINT_MAX_REGION_SIZE]
200     if (size < 1 || size > SOC_CPU_WATCHPOINT_MAX_REGION_SIZE || (size & (size - 1)) != 0) {
201         return ESP_ERR_INVALID_ARG;
202     }
203     bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
204     bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
205 #if __XTENSA__
206     xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
207 #else
208     if (esp_cpu_dbgr_is_attached()) {
209         // See description in esp_cpu_set_breakpoint()
210         long args[] = {true, wp_num, (long)wp_addr, (long)size,
211                        (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
212                       };
213         int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
214         if (ret == 0) {
215             return ESP_ERR_INVALID_RESPONSE;
216         }
217     } else {
218         rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
219 	}
220 #endif // __XTENSA__
221     return ESP_OK;
222 }
223 
esp_cpu_clear_watchpoint(int wp_num)224 esp_err_t esp_cpu_clear_watchpoint(int wp_num)
225 {
226     /*
227     Todo:
228     - Check if the wp_num is valid
229     */
230 #if __XTENSA__
231     xt_utils_clear_watchpoint(wp_num);
232 #else
233     if (esp_cpu_dbgr_is_attached()) {
234         // See description in esp_cpu_dbgr_is_attached()
235         long args[] = {false, wp_num};
236         int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
237         if (ret == 0) {
238             return ESP_ERR_INVALID_RESPONSE;
239         }
240     } else {
241         rv_utils_clear_watchpoint(wp_num);
242 	}
243 #endif // __XTENSA__
244     return ESP_OK;
245 }
246 #endif // SOC_CPU_WATCHPOINTS_NUM > 0
247 
248 /* ------------------------------------------------------ Misc ---------------------------------------------------------
249  *
250  * ------------------------------------------------------------------------------------------------------------------ */
251 
252 #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
253 static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
254 #endif
255 
esp_cpu_compare_and_set(volatile uint32_t * addr,uint32_t compare_value,uint32_t new_value)256 bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
257 {
258 #if __XTENSA__
259     bool ret;
260 #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
261     // Check if the target address is in external RAM
262     if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
263         /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
264         atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
265         uint32_t intr_level;
266         __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
267                               : "=r"(intr_level));
268         if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
269             // External RAM CAS lock already taken. Exit
270             ret = false;
271             goto exit;
272         }
273         // Now we compare and set the target address
274         ret = (*addr == compare_value);
275         if (ret) {
276             *addr = new_value;
277         }
278         // Release the external RAM CAS lock
279         external_ram_cas_lock = 0;
280 exit:
281         // Reenable interrupts
282         __asm__ __volatile__ ("memw \n"
283                               "wsr %0, ps\n"
284                               :: "r"(intr_level));
285     } else
286 #endif  // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
287     {
288         // The target address is in internal RAM. Use the CPU's native CAS instruction
289         ret = xt_utils_compare_and_set(addr, compare_value, new_value);
290     }
291     return ret;
292 #else // __XTENSA__
293     // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
294     return rv_utils_compare_and_set(addr, compare_value, new_value);
295 #endif
296 }
297