1 /*
2 * Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <string.h>
8 #include "pico/async_context_freertos.h"
9 #include "pico/async_context_base.h"
10 #include "pico/sync.h"
11 #include "hardware/irq.h"
12
13 #include "semphr.h"
14
15 #if configNUMBER_OF_CORES > 1 && !defined(configUSE_CORE_AFFINITY)
16 #error async_context_freertos requires configUSE_CORE_AFFINITY under SMP
17 #endif
18
19 static const async_context_type_t template;
20
21 static void async_context_freertos_acquire_lock_blocking(async_context_t *self_base);
22 static void async_context_freertos_release_lock(async_context_t *self_base);
23 static void async_context_freertos_lock_check(async_context_t *self_base);
24
sensible_ticks_until(absolute_time_t until)25 static TickType_t sensible_ticks_until(absolute_time_t until) {
26 TickType_t ticks;
27 int64_t delay_us = absolute_time_diff_us(get_absolute_time(), until);
28 if (delay_us <= 0) {
29 ticks = 0;
30 } else {
31 static const uint32_t max_delay = 60000000;
32 uint32_t delay_us_32 = delay_us > max_delay ? max_delay : (uint32_t) delay_us;
33 ticks = pdMS_TO_TICKS((delay_us_32+999)/1000);
34 // we want to round up, as both rounding down to zero is wrong (may produce no delays
35 // where delays are needed), but also we don't want to wake up, and then realize there
36 // is no work to do yet!
37 ticks++;
38 }
39 return ticks;
40 }
41
process_under_lock(async_context_freertos_t * self)42 static void process_under_lock(async_context_freertos_t *self) {
43 #ifndef NDEBUG
44 async_context_freertos_lock_check(&self->core);
45 #endif
46 bool repeat;
47 do {
48 repeat = false;
49 absolute_time_t next_time = async_context_base_execute_once(&self->core);
50 TickType_t ticks;
51 if (is_at_the_end_of_time(next_time)) {
52 ticks = portMAX_DELAY;
53 } else {
54 ticks = sensible_ticks_until(next_time);
55 }
56 if (ticks) {
57 // last parameter (timeout) is also 'ticks', since there is no point waiting to change the period
58 // for longer than the period itself!
59 repeat = pdFALSE == xTimerChangePeriod(self->timer_handle, ticks, ticks);
60 } else {
61 repeat = true;
62 }
63 } while (repeat);
64 }
65
async_context_task(__unused void * vself)66 static void async_context_task(__unused void *vself) {
67 async_context_freertos_t *self = (async_context_freertos_t *)vself;
68 do {
69 ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
70 if (self->task_should_exit) break;
71 async_context_freertos_acquire_lock_blocking(&self->core);
72 process_under_lock(self);
73 async_context_freertos_release_lock(&self->core);
74 __sev(); // it is possible regular code is waiting on a WFE on the other core
75 } while (!self->task_should_exit);
76 vTaskDelete(NULL);
77 }
78
async_context_freertos_wake_up(async_context_t * self_base)79 static void async_context_freertos_wake_up(async_context_t *self_base) {
80 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
81 if (self->task_handle) {
82 if (portCHECK_IF_IN_ISR()) {
83 vTaskNotifyGiveFromISR(self->task_handle, NULL);
84 xSemaphoreGiveFromISR(self->work_needed_sem, NULL);
85 } else {
86 // we don't want to wake ourselves up (we will only ever be called
87 // from the async_context_task if we own the lock, in which case processing
88 // will already happen when the lock is finally unlocked
89 if (xTaskGetCurrentTaskHandle() != self->task_handle) {
90 xTaskNotifyGive(self->task_handle);
91 xSemaphoreGive(self->work_needed_sem);
92 } else {
93 #ifndef NDEBUG
94 async_context_freertos_lock_check(self_base);
95 #endif
96 }
97 }
98 }
99 }
100
timer_handler(__unused TimerHandle_t handle)101 static void timer_handler(__unused TimerHandle_t handle)
102 {
103 async_context_freertos_t *self = (async_context_freertos_t *)pvTimerGetTimerID(handle);
104 async_context_freertos_wake_up(&self->core);
105 }
106
async_context_freertos_init(async_context_freertos_t * self,async_context_freertos_config_t * config)107 bool async_context_freertos_init(async_context_freertos_t *self, async_context_freertos_config_t *config) {
108 memset(self, 0, sizeof(*self));
109 self->core.type = &template;
110 self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
111 self->core.core_num = get_core_num();
112 self->lock_mutex = xSemaphoreCreateRecursiveMutex();
113 self->work_needed_sem = xSemaphoreCreateBinary();
114 self->timer_handle = xTimerCreate( "async_context_timer", // Just a text name, not used by the kernel.
115 portMAX_DELAY,
116 pdFALSE, // The timers will auto-reload themselves when they expire.
117 self,
118 timer_handler);
119
120 if (!self->lock_mutex ||
121 !self->work_needed_sem ||
122 !self->timer_handle ||
123 pdPASS != xTaskCreate(async_context_task, "async_context_task", config->task_stack_size, self,
124 config->task_priority, &self->task_handle)) {
125 async_context_deinit(&self->core);
126 return false;
127 }
128 #if configNUMBER_OF_CORES > 1
129 UBaseType_t core_id = config->task_core_id;
130 if (core_id == (UBaseType_t)-1) {
131 core_id = portGET_CORE_ID();
132 }
133 // we must run on a single core
134 vTaskCoreAffinitySet(self->task_handle, 1u << core_id);
135 #endif
136 return true;
137 }
138
end_task_func(void * param)139 static uint32_t end_task_func(void *param) {
140 async_context_freertos_t *self = (async_context_freertos_t *)param;
141 // we will immediately exit
142 self->task_should_exit = true;
143 return 0;
144 }
145
async_context_freertos_deinit(async_context_t * self_base)146 void async_context_freertos_deinit(async_context_t *self_base) {
147 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
148 if (self->task_handle) {
149 async_context_execute_sync(self_base, end_task_func, self_base);
150 }
151 if (self->timer_handle) {
152 xTimerDelete(self->timer_handle, 0);
153 }
154 if (self->lock_mutex) {
155 vSemaphoreDelete(self->lock_mutex);
156 }
157 if (self->work_needed_sem) {
158 vSemaphoreDelete(self->work_needed_sem);
159 }
160 memset(self, 0, sizeof(*self));
161 }
162
async_context_freertos_acquire_lock_blocking(async_context_t * self_base)163 void async_context_freertos_acquire_lock_blocking(async_context_t *self_base) {
164 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
165 // Lock the other core and stop low_prio_irq running
166 assert(!portCHECK_IF_IN_ISR());
167 xSemaphoreTakeRecursive(self->lock_mutex, portMAX_DELAY);
168 self->nesting++;
169 }
170
async_context_freertos_lock_check(__unused async_context_t * self_base)171 void async_context_freertos_lock_check(__unused async_context_t *self_base) {
172 #ifndef NDEBUG
173 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
174 // Lock the other core and stop low_prio_irq running
175 assert(xSemaphoreGetMutexHolder(self->lock_mutex) == xTaskGetCurrentTaskHandle());
176 #endif
177 }
178
179 typedef struct sync_func_call{
180 async_when_pending_worker_t worker;
181 SemaphoreHandle_t sem;
182 uint32_t (*func)(void *param);
183 void *param;
184 uint32_t rc;
185 } sync_func_call_t;
186
handle_sync_func_call(async_context_t * context,async_when_pending_worker_t * worker)187 static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
188 sync_func_call_t *call = (sync_func_call_t *)worker;
189 call->rc = call->func(call->param);
190 xSemaphoreGive(call->sem);
191 }
192
async_context_freertos_execute_sync(async_context_t * self_base,uint32_t (* func)(void * param),void * param)193 uint32_t async_context_freertos_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
194 async_context_freertos_t *self = (async_context_freertos_t*)self_base;
195 hard_assert(xSemaphoreGetMutexHolder(self->lock_mutex) != xTaskGetCurrentTaskHandle());
196 sync_func_call_t call;
197 call.worker.do_work = handle_sync_func_call;
198 call.func = func;
199 call.param = param;
200 call.sem = xSemaphoreCreateBinary();
201 async_context_add_when_pending_worker(self_base, &call.worker);
202 async_context_set_work_pending(self_base, &call.worker);
203 xSemaphoreTake(call.sem, portMAX_DELAY);
204 async_context_remove_when_pending_worker(self_base, &call.worker);
205 vSemaphoreDelete(call.sem);
206 return call.rc;
207 }
208
async_context_freertos_release_lock(async_context_t * self_base)209 void async_context_freertos_release_lock(async_context_t *self_base) {
210 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
211 bool do_wakeup = false;
212 if (self->nesting == 1) {
213 // note that we always do a processing on outermost lock exit, to facilitate cases
214 // like lwIP where we have no notification when lwIP timers are added.
215 //
216 // this operation must be done from the right task
217 if (self->task_handle != xTaskGetCurrentTaskHandle()) {
218 // note we defer the wakeup until after we release the lock, otherwise it can be wasteful
219 // (waking up the task, but then having it block immediately on us)
220 do_wakeup = true;
221 } else {
222 process_under_lock(self);
223 }
224 }
225 --self->nesting;
226 xSemaphoreGiveRecursive(self->lock_mutex);
227 if (do_wakeup) {
228 async_context_freertos_wake_up(self_base);
229 }
230 }
231
async_context_freertos_add_at_time_worker(async_context_t * self_base,async_at_time_worker_t * worker)232 static bool async_context_freertos_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
233 async_context_freertos_acquire_lock_blocking(self_base);
234 bool rc = async_context_base_add_at_time_worker(self_base, worker);
235 async_context_freertos_release_lock(self_base);
236 return rc;
237 }
238
async_context_freertos_remove_at_time_worker(async_context_t * self_base,async_at_time_worker_t * worker)239 static bool async_context_freertos_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
240 async_context_freertos_acquire_lock_blocking(self_base);
241 bool rc = async_context_base_remove_at_time_worker(self_base, worker);
242 async_context_freertos_release_lock(self_base);
243 return rc;
244 }
245
async_context_freertos_add_when_pending_worker(async_context_t * self_base,async_when_pending_worker_t * worker)246 static bool async_context_freertos_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
247 async_context_freertos_acquire_lock_blocking(self_base);
248 bool rc = async_context_base_add_when_pending_worker(self_base, worker);
249 async_context_freertos_release_lock(self_base);
250 return rc;
251 }
252
async_context_freertos_remove_when_pending_worker(async_context_t * self_base,async_when_pending_worker_t * worker)253 static bool async_context_freertos_remove_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
254 async_context_freertos_acquire_lock_blocking(self_base);
255 bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
256 async_context_freertos_release_lock(self_base);
257 return rc;
258 }
259
async_context_freertos_set_work_pending(async_context_t * self_base,async_when_pending_worker_t * worker)260 static void async_context_freertos_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
261 worker->work_pending = true;
262 async_context_freertos_wake_up(self_base);
263 }
264
async_context_freertos_wait_until(__unused async_context_t * self_base,absolute_time_t until)265 static void async_context_freertos_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
266 assert(!portCHECK_IF_IN_ISR());
267 TickType_t ticks = sensible_ticks_until(until);
268 vTaskDelay(ticks);
269 }
270
async_context_freertos_wait_for_work_until(async_context_t * self_base,absolute_time_t until)271 static void async_context_freertos_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
272 async_context_freertos_t *self = (async_context_freertos_t *)self_base;
273 assert(!portCHECK_IF_IN_ISR());
274 while (!time_reached(until)) {
275 TickType_t ticks = sensible_ticks_until(until);
276 if (!ticks || xSemaphoreTake(self->work_needed_sem, ticks)) return;
277 }
278 }
279
280 static const async_context_type_t template = {
281 .type = ASYNC_CONTEXT_FREERTOS,
282 .acquire_lock_blocking = async_context_freertos_acquire_lock_blocking,
283 .release_lock = async_context_freertos_release_lock,
284 .lock_check = async_context_freertos_lock_check,
285 .execute_sync = async_context_freertos_execute_sync,
286 .add_at_time_worker = async_context_freertos_add_at_time_worker,
287 .remove_at_time_worker = async_context_freertos_remove_at_time_worker,
288 .add_when_pending_worker = async_context_freertos_add_when_pending_worker,
289 .remove_when_pending_worker = async_context_freertos_remove_when_pending_worker,
290 .set_work_pending = async_context_freertos_set_work_pending,
291 .poll = 0,
292 .wait_until = async_context_freertos_wait_until,
293 .wait_for_work_until = async_context_freertos_wait_for_work_until,
294 .deinit = async_context_freertos_deinit,
295 };
296