1 /*
2  * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <assert.h>
11 #include "esp_err.h"
12 #include "esp_ipc.h"
13 #include "esp_private/esp_ipc_isr.h"
14 #include "esp_attr.h"
15 #include "esp_cpu.h"
16 
17 #include "freertos/FreeRTOS.h"
18 #include "freertos/task.h"
19 #include "freertos/semphr.h"
20 
21 #define IPC_MAX_PRIORITY (configMAX_PRIORITIES - 1)
22 
23 #if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
24 
25 #if CONFIG_COMPILER_OPTIMIZATION_NONE
26 #define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE + 0x100)
27 #else
28 #define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE)
29 #endif //CONFIG_COMPILER_OPTIMIZATION_NONE
30 
31 static DRAM_ATTR StaticSemaphore_t s_ipc_mutex_buffer[portNUM_PROCESSORS];
32 static DRAM_ATTR StaticSemaphore_t s_ipc_ack_buffer[portNUM_PROCESSORS];
33 
34 static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
35 static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS];    // This mutex is used as a global lock for esp_ipc_* APIs
36 static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS];      // Semaphore used to acknowledge that task was woken up,
37 static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS] = { 0 };   // Function which should be called by high priority task
38 static void * volatile s_func_arg[portNUM_PROCESSORS];       // Argument to pass into s_func
39 typedef enum {
40     IPC_WAIT_NO = 0,
41     IPC_WAIT_FOR_START,
42     IPC_WAIT_FOR_END,
43 } esp_ipc_wait_t;
44 
45 static esp_ipc_wait_t volatile s_wait_for[portNUM_PROCESSORS];
46 
47 static volatile esp_ipc_func_t s_no_block_func[portNUM_PROCESSORS] = { 0 };
48 static volatile bool s_no_block_func_and_arg_are_ready[portNUM_PROCESSORS] = { 0 };
49 static void * volatile s_no_block_func_arg[portNUM_PROCESSORS];
50 
ipc_task(void * arg)51 static void IRAM_ATTR ipc_task(void* arg)
52 {
53     const int cpuid = (int) arg;
54 
55     assert(cpuid == xPortGetCoreID());
56 #ifdef CONFIG_ESP_IPC_ISR_ENABLE
57     esp_ipc_isr_init();
58 #endif
59 
60     while (true) {
61         ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
62 
63         if (s_no_block_func_and_arg_are_ready[cpuid] && s_no_block_func[cpuid]) {
64             (*s_no_block_func[cpuid])(s_no_block_func_arg[cpuid]);
65             s_no_block_func_and_arg_are_ready[cpuid] = false;
66             s_no_block_func[cpuid] = NULL;
67         }
68 
69 #ifndef CONFIG_FREERTOS_UNICORE
70         if (s_func[cpuid]) {
71             // we need to cache s_func, s_func_arg and ipc_ack variables locally
72             // because they can be changed by a subsequent IPC call (after xTaskNotify(caller_task_handle)).
73             esp_ipc_func_t func = s_func[cpuid];
74             void* func_arg = s_func_arg[cpuid];
75             esp_ipc_wait_t ipc_wait = s_wait_for[cpuid];
76             SemaphoreHandle_t ipc_ack = s_ipc_ack[cpuid];
77             s_func[cpuid] = NULL;
78 
79             if (ipc_wait == IPC_WAIT_FOR_START) {
80                 xSemaphoreGive(ipc_ack);
81                 (*func)(func_arg);
82             } else if (ipc_wait == IPC_WAIT_FOR_END) {
83                 (*func)(func_arg);
84                 xSemaphoreGive(ipc_ack);
85             } else {
86                 abort();
87             }
88         }
89 #endif // !CONFIG_FREERTOS_UNICORE
90     }
91     // TODO: currently this is unreachable code. Introduce esp_ipc_uninit
92     // function which will signal to both tasks that they can shut down.
93     // Not critical at this point, we don't have a use case for stopping
94     // IPC yet.
95     // Also need to delete the semaphore here.
96     vTaskDelete(NULL);
97 }
98 
99 /*
100  * Initialize inter-processor call module. This function is called automatically
101  * on CPU start and should not be called from the application.
102  *
103  * This function start two tasks, one on each CPU. These tasks are started
104  * with high priority. These tasks are normally inactive, waiting until one of
105  * the esp_ipc_call_* functions to be used. One of these tasks will be
106  * woken up to execute the callback provided to esp_ipc_call_nonblocking or
107  * esp_ipc_call_blocking.
108  */
109 static void esp_ipc_init(void) __attribute__((constructor));
110 
esp_ipc_init(void)111 static void esp_ipc_init(void)
112 {
113     char task_name[] = "ipcX"; // up to 10 ipc tasks/cores (0-9)
114 
115     for (int i = 0; i < portNUM_PROCESSORS; ++i) {
116         task_name[3] = i + (char)'0';
117         s_ipc_mutex[i] = xSemaphoreCreateMutexStatic(&s_ipc_mutex_buffer[i]);
118         s_ipc_ack[i] = xSemaphoreCreateBinaryStatic(&s_ipc_ack_buffer[i]);
119         portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, IPC_STACK_SIZE, (void*) i,
120                                                     IPC_MAX_PRIORITY, &s_ipc_task_handle[i], i);
121         assert(res == pdTRUE);
122         (void)res;
123     }
124 }
125 
esp_ipc_call_and_wait(uint32_t cpu_id,esp_ipc_func_t func,void * arg,esp_ipc_wait_t wait_for)126 static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for)
127 {
128     if (cpu_id >= portNUM_PROCESSORS) {
129         return ESP_ERR_INVALID_ARG;
130     }
131     if (s_ipc_task_handle[cpu_id] == NULL) {
132         return ESP_ERR_INVALID_STATE;
133     }
134     if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
135         return ESP_ERR_INVALID_STATE;
136     }
137 
138 #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
139     TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
140     UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
141     UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
142     if (priority_of_running_ipc_task < priority_of_current_task) {
143         vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
144     }
145 
146     xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
147     vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
148 #else
149     xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
150 #endif
151 
152     s_func_arg[cpu_id] = arg;
153     s_wait_for[cpu_id] = wait_for;
154     // s_func must be set after all other parameters. The ipc_task use this as indicator of the IPC is prepared.
155     s_func[cpu_id] = func;
156     xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
157     xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
158 
159 #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
160     xSemaphoreGive(s_ipc_mutex[cpu_id]);
161 #else
162     xSemaphoreGive(s_ipc_mutex[0]);
163 #endif
164     return ESP_OK;
165 }
166 
esp_ipc_call(uint32_t cpu_id,esp_ipc_func_t func,void * arg)167 esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
168 {
169     return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START);
170 }
171 
esp_ipc_call_blocking(uint32_t cpu_id,esp_ipc_func_t func,void * arg)172 esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
173 {
174     return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
175 }
176 
esp_ipc_call_nonblocking(uint32_t cpu_id,esp_ipc_func_t func,void * arg)177 esp_err_t esp_ipc_call_nonblocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
178 {
179     if (cpu_id >= portNUM_PROCESSORS || s_ipc_task_handle[cpu_id] == NULL) {
180         return ESP_ERR_INVALID_ARG;
181     }
182     if (cpu_id == xPortGetCoreID() && xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
183         return ESP_ERR_INVALID_STATE;
184     }
185 
186     // Since it can be called from an interrupt or Scheduler is Suspened, it can not wait for a mutex to be released.
187     if (esp_cpu_compare_and_set((volatile uint32_t *)&s_no_block_func[cpu_id], 0, (uint32_t)func)) {
188         s_no_block_func_arg[cpu_id] = arg;
189         s_no_block_func_and_arg_are_ready[cpu_id] = true;
190 
191         if (xPortInIsrContext()) {
192             vTaskNotifyGiveFromISR(s_ipc_task_handle[cpu_id], NULL);
193         } else {
194 #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
195             vTaskPrioritySet(s_ipc_task_handle[cpu_id], IPC_MAX_PRIORITY);
196 #endif
197             xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
198         }
199         return ESP_OK;
200     }
201 
202     // the previous call was not completed
203     return ESP_FAIL;
204 }
205 
206 #endif // !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
207