1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <stdint.h>
7 #include "esp_attr.h"
8 #include "esp_err.h"
9 #include "esp_cpu.h"
10 #include "esp_intr_alloc.h"
11 #include "esp_debug_helpers.h"
12 #include "soc/periph_defs.h"
13 
14 
15 #include "freertos/FreeRTOS.h"
16 #include "freertos/portmacro.h"
17 
18 #if CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
19 #include "esp_gdbstub.h"
20 #endif
21 
22 #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
23 #include "soc/dport_reg.h"
24 #else
25 #include "soc/system_reg.h"
26 #endif
27 
28 #define REASON_YIELD            BIT(0)
29 #define REASON_FREQ_SWITCH      BIT(1)
30 #define REASON_GDB_CALL         BIT(3)
31 
32 #if CONFIG_IDF_TARGET_ARCH_XTENSA
33 #define REASON_PRINT_BACKTRACE  BIT(2)
34 #define REASON_TWDT_ABORT       BIT(4)
35 #endif
36 
37 static portMUX_TYPE reason_spinlock = portMUX_INITIALIZER_UNLOCKED;
38 static volatile uint32_t reason[portNUM_PROCESSORS];
39 
40 /*
41 ToDo: There is a small chance the CPU already has yielded when this ISR is serviced. In that case, it's running the intended task but
42 the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that.
43 */
esp_crosscore_isr_handle_yield(void)44 static inline void IRAM_ATTR esp_crosscore_isr_handle_yield(void)
45 {
46     portYIELD_FROM_ISR();
47 }
48 
esp_crosscore_isr(void * arg)49 static void IRAM_ATTR esp_crosscore_isr(void *arg) {
50     uint32_t my_reason_val;
51     //A pointer to the correct reason array item is passed to this ISR.
52     volatile uint32_t *my_reason=arg;
53 
54     //Clear the interrupt first.
55 #if CONFIG_IDF_TARGET_ESP32
56     if (esp_cpu_get_core_id()==0) {
57         DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
58     } else {
59         DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
60     }
61 #elif CONFIG_IDF_TARGET_ESP32S2
62     DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
63 #elif CONFIG_IDF_TARGET_ESP32S3
64     if (esp_cpu_get_core_id()==0) {
65         WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0);
66     } else {
67         WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0);
68     }
69 #elif CONFIG_IDF_TARGET_ARCH_RISCV
70     WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0);
71 #endif
72 
73     //Grab the reason and clear it.
74     portENTER_CRITICAL_ISR(&reason_spinlock);
75     my_reason_val=*my_reason;
76     *my_reason=0;
77     portEXIT_CRITICAL_ISR(&reason_spinlock);
78 
79     //Check what we need to do.
80     if (my_reason_val & REASON_YIELD) {
81         esp_crosscore_isr_handle_yield();
82     }
83     if (my_reason_val & REASON_FREQ_SWITCH) {
84         /* Nothing to do here; the frequency switch event was already
85          * handled by a hook in xtensa_vectors.S. Could be used in the future
86          * to allow DFS features without the extra latency of the ISR hook.
87          */
88     }
89 #if CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
90     if (my_reason_val & REASON_GDB_CALL) {
91         update_breakpoints();
92     }
93 #endif // !CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
94 #if CONFIG_IDF_TARGET_ARCH_XTENSA // IDF-2986
95     if (my_reason_val & REASON_PRINT_BACKTRACE) {
96         esp_backtrace_print(100);
97     }
98 
99 #if CONFIG_ESP_TASK_WDT_EN
100     if (my_reason_val & REASON_TWDT_ABORT) {
101         extern void task_wdt_timeout_abort_xtensa(bool);
102         /* Called from a crosscore interrupt, thus, we are not the core that received
103          * the TWDT interrupt, call the function with `false` as a parameter. */
104         task_wdt_timeout_abort_xtensa(false);
105     }
106 #endif // CONFIG_ESP_TASK_WDT_EN
107 #endif // CONFIG_IDF_TARGET_ARCH_XTENSA
108 }
109 
110 //Initialize the crosscore interrupt on this core. Call this once
111 //on each active core.
esp_crosscore_int_init(void)112 void esp_crosscore_int_init(void) {
113     portENTER_CRITICAL(&reason_spinlock);
114     reason[esp_cpu_get_core_id()]=0;
115     portEXIT_CRITICAL(&reason_spinlock);
116     esp_err_t err __attribute__((unused)) = ESP_OK;
117 #if portNUM_PROCESSORS > 1
118     if (esp_cpu_get_core_id()==0) {
119         err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[0], NULL);
120     } else {
121         err = esp_intr_alloc(ETS_FROM_CPU_INTR1_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[1], NULL);
122     }
123 #else
124     err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[0], NULL);
125 #endif
126     ESP_ERROR_CHECK(err);
127 }
128 
esp_crosscore_int_send(int core_id,uint32_t reason_mask)129 static void IRAM_ATTR esp_crosscore_int_send(int core_id, uint32_t reason_mask) {
130     assert(core_id<portNUM_PROCESSORS);
131     //Mark the reason we interrupt the other CPU
132     portENTER_CRITICAL_ISR(&reason_spinlock);
133     reason[core_id] |= reason_mask;
134     portEXIT_CRITICAL_ISR(&reason_spinlock);
135     //Poke the other CPU.
136 #if CONFIG_IDF_TARGET_ESP32
137     if (core_id==0) {
138         DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
139     } else {
140         DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
141     }
142 #elif CONFIG_IDF_TARGET_ESP32S2
143     DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
144 #elif CONFIG_IDF_TARGET_ESP32S3
145     if (core_id==0) {
146         WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0);
147     } else {
148         WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1);
149     }
150 #elif CONFIG_IDF_TARGET_ARCH_RISCV
151     WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0);
152 #endif
153 }
154 
esp_crosscore_int_send_yield(int core_id)155 void IRAM_ATTR esp_crosscore_int_send_yield(int core_id)
156 {
157     esp_crosscore_int_send(core_id, REASON_YIELD);
158 }
159 
esp_crosscore_int_send_freq_switch(int core_id)160 void IRAM_ATTR esp_crosscore_int_send_freq_switch(int core_id)
161 {
162     esp_crosscore_int_send(core_id, REASON_FREQ_SWITCH);
163 }
164 
esp_crosscore_int_send_gdb_call(int core_id)165 void IRAM_ATTR esp_crosscore_int_send_gdb_call(int core_id)
166 {
167     esp_crosscore_int_send(core_id, REASON_GDB_CALL);
168 }
169 
170 #if CONFIG_IDF_TARGET_ARCH_XTENSA
esp_crosscore_int_send_print_backtrace(int core_id)171 void IRAM_ATTR esp_crosscore_int_send_print_backtrace(int core_id)
172 {
173     esp_crosscore_int_send(core_id, REASON_PRINT_BACKTRACE);
174 }
175 
176 #if CONFIG_ESP_TASK_WDT_EN
esp_crosscore_int_send_twdt_abort(int core_id)177 void IRAM_ATTR esp_crosscore_int_send_twdt_abort(int core_id) {
178     esp_crosscore_int_send(core_id, REASON_TWDT_ABORT);
179 }
180 #endif // CONFIG_ESP_TASK_WDT_EN
181 #endif
182