1 /*
2 * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stddef.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <assert.h>
11 #include "esp_err.h"
12 #include "esp_ipc.h"
13 #include "esp_private/esp_ipc_isr.h"
14 #include "esp_attr.h"
15
16 #include "freertos/FreeRTOS.h"
17 #include "freertos/task.h"
18 #include "freertos/semphr.h"
19
20 #if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
21
22 #if CONFIG_COMPILER_OPTIMIZATION_NONE
23 #define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE + 0x100)
24 #else
25 #define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE)
26 #endif //CONFIG_COMPILER_OPTIMIZATION_NONE
27
28 static DRAM_ATTR StaticSemaphore_t s_ipc_mutex_buffer[portNUM_PROCESSORS];
29 static DRAM_ATTR StaticSemaphore_t s_ipc_ack_buffer[portNUM_PROCESSORS];
30
31 static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
32 static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS]; // This mutex is used as a global lock for esp_ipc_* APIs
33 static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS]; // Semaphore used to acknowledge that task was woken up,
34 static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS] = { 0 }; // Function which should be called by high priority task
35 static void * volatile s_func_arg[portNUM_PROCESSORS]; // Argument to pass into s_func
36 typedef enum {
37 IPC_WAIT_NO = 0,
38 IPC_WAIT_FOR_START,
39 IPC_WAIT_FOR_END,
40 } esp_ipc_wait_t;
41
42 #if CONFIG_APPTRACE_GCOV_ENABLE
43 static volatile esp_ipc_func_t s_gcov_func = NULL; // Gcov dump starter function which should be called by high priority task
44 static void * volatile s_gcov_func_arg; // Argument to pass into s_gcov_func
45 #endif
46
ipc_task(void * arg)47 static void IRAM_ATTR ipc_task(void* arg)
48 {
49 const int cpuid = (int) arg;
50
51 assert(cpuid == xPortGetCoreID());
52 #ifdef CONFIG_ESP_IPC_ISR_ENABLE
53 esp_ipc_isr_init();
54 #endif
55
56 while (true) {
57 uint32_t ipc_wait;
58 xTaskNotifyWait(0, ULONG_MAX, &ipc_wait, portMAX_DELAY);
59
60 #if CONFIG_APPTRACE_GCOV_ENABLE
61 if (s_gcov_func) {
62 (*s_gcov_func)(s_gcov_func_arg);
63 s_gcov_func = NULL;
64 /* we can not interfer with IPC calls so no need for further processing */
65 // esp_ipc API and gcov_from_isr APIs can be processed together if they came at the same time
66 if (ipc_wait == IPC_WAIT_NO) {
67 continue;
68 }
69 }
70 #endif // CONFIG_APPTRACE_GCOV_ENABLE
71
72 #ifndef CONFIG_FREERTOS_UNICORE
73 if (s_func[cpuid]) {
74 // we need to cache s_func, s_func_arg and ipc_ack variables locally
75 // because they can be changed by a subsequent IPC call (after xTaskNotify(caller_task_handle)).
76 esp_ipc_func_t func = s_func[cpuid];
77 s_func[cpuid] = NULL;
78 void* func_arg = s_func_arg[cpuid];
79 SemaphoreHandle_t ipc_ack = s_ipc_ack[cpuid];
80
81 if (ipc_wait == IPC_WAIT_FOR_START) {
82 xSemaphoreGive(ipc_ack);
83 (*func)(func_arg);
84 } else if (ipc_wait == IPC_WAIT_FOR_END) {
85 (*func)(func_arg);
86 xSemaphoreGive(ipc_ack);
87 } else {
88 abort();
89 }
90 }
91 #endif // !CONFIG_FREERTOS_UNICORE
92 }
93 // TODO: currently this is unreachable code. Introduce esp_ipc_uninit
94 // function which will signal to both tasks that they can shut down.
95 // Not critical at this point, we don't have a use case for stopping
96 // IPC yet.
97 // Also need to delete the semaphore here.
98 vTaskDelete(NULL);
99 }
100
101 /*
102 * Initialize inter-processor call module. This function is called automatically
103 * on CPU start and should not be called from the application.
104 *
105 * This function start two tasks, one on each CPU. These tasks are started
106 * with high priority. These tasks are normally inactive, waiting until one of
107 * the esp_ipc_call_* functions to be used. One of these tasks will be
108 * woken up to execute the callback provided to esp_ipc_call_nonblocking or
109 * esp_ipc_call_blocking.
110 */
111 static void esp_ipc_init(void) __attribute__((constructor));
112
esp_ipc_init(void)113 static void esp_ipc_init(void)
114 {
115 char task_name[] = "ipcX"; // up to 10 ipc tasks/cores (0-9)
116
117 for (int i = 0; i < portNUM_PROCESSORS; ++i) {
118 task_name[3] = i + (char)'0';
119 s_ipc_mutex[i] = xSemaphoreCreateMutexStatic(&s_ipc_mutex_buffer[i]);
120 s_ipc_ack[i] = xSemaphoreCreateBinaryStatic(&s_ipc_ack_buffer[i]);
121 portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, IPC_STACK_SIZE, (void*) i,
122 configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
123 assert(res == pdTRUE);
124 (void)res;
125 }
126 }
127
esp_ipc_call_and_wait(uint32_t cpu_id,esp_ipc_func_t func,void * arg,esp_ipc_wait_t wait_for)128 static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for)
129 {
130 if (cpu_id >= portNUM_PROCESSORS) {
131 return ESP_ERR_INVALID_ARG;
132 }
133 if (s_ipc_task_handle[cpu_id] == NULL) {
134 return ESP_ERR_INVALID_STATE;
135 }
136 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
137 return ESP_ERR_INVALID_STATE;
138 }
139
140 #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
141 TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
142 UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
143 UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
144 if (priority_of_running_ipc_task < priority_of_current_task) {
145 vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
146 }
147
148 xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
149 vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
150 #else
151 xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
152 #endif
153
154 s_func[cpu_id] = func;
155 s_func_arg[cpu_id] = arg;
156 xTaskNotify(s_ipc_task_handle[cpu_id], wait_for, eSetValueWithOverwrite);
157 xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
158
159 #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
160 xSemaphoreGive(s_ipc_mutex[cpu_id]);
161 #else
162 xSemaphoreGive(s_ipc_mutex[0]);
163 #endif
164 return ESP_OK;
165 }
166
esp_ipc_call(uint32_t cpu_id,esp_ipc_func_t func,void * arg)167 esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
168 {
169 return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START);
170 }
171
esp_ipc_call_blocking(uint32_t cpu_id,esp_ipc_func_t func,void * arg)172 esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
173 {
174 return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
175 }
176
177 // currently this is only called from gcov component
178 // the top level guarantees that the next call will be only after the previous one has completed
179 #if CONFIG_APPTRACE_GCOV_ENABLE
esp_ipc_start_gcov_from_isr(uint32_t cpu_id,esp_ipc_func_t func,void * arg)180 esp_err_t esp_ipc_start_gcov_from_isr(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
181 {
182 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
183 return ESP_ERR_INVALID_STATE;
184 }
185
186 // Since it is called from an interrupt, it can not wait for a mutex to be released.
187 if (s_gcov_func == NULL) {
188 s_gcov_func_arg = arg;
189 s_gcov_func = func;
190
191 // If the target task already has a notification pending then its notification value is not updated (WithoutOverwrite).
192 xTaskNotifyFromISR(s_ipc_task_handle[cpu_id], IPC_WAIT_NO, eSetValueWithoutOverwrite, NULL);
193 return ESP_OK;
194 }
195
196 // the previous call was not completed
197 return ESP_FAIL;
198 }
199 #endif // CONFIG_APPTRACE_GCOV_ENABLE
200
201 #endif // !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
202