1 /*
2 * Copyright (c) 2022 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <string.h>
8 #include "pico/async_context_threadsafe_background.h"
9 #include "pico/async_context_base.h"
10 #include "pico/sync.h"
11 #include "hardware/irq.h"
12
13 static const async_context_type_t template;
14 static async_context_threadsafe_background_t *async_contexts_by_user_irq[NUM_USER_IRQS];
15
16 static void low_priority_irq_handler(void);
17 static void process_under_lock(async_context_threadsafe_background_t *self);
18 static int64_t alarm_handler(alarm_id_t id, void *user_data);
19
20 #ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY
21 #define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY PICO_LOWEST_IRQ_PRIORITY
22 #endif
23
24 #ifndef ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS
25 #define ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS 4
26 #endif
27
async_context_threadsafe_background_default_config(void)28 async_context_threadsafe_background_config_t async_context_threadsafe_background_default_config(void) {
29 async_context_threadsafe_background_config_t config = {
30 .low_priority_irq_handler_priority = ASYNC_CONTEXT_THREADSAFE_BACKGROUND_DEFAULT_LOW_PRIORITY_IRQ_HANDLER_PRIORITY,
31 .custom_alarm_pool = NULL,
32 };
33 return config;
34 }
35
recursive_mutex_enter_count(recursive_mutex_t * mutex)36 static inline uint recursive_mutex_enter_count(recursive_mutex_t *mutex) {
37 return mutex->enter_count;
38 }
39
recursive_mutex_owner(recursive_mutex_t * mutex)40 static inline lock_owner_id_t recursive_mutex_owner(recursive_mutex_t *mutex) {
41 return mutex->owner;
42 }
43
async_context_threadsafe_background_wake_up(async_context_t * self_base)44 static void async_context_threadsafe_background_wake_up(async_context_t *self_base) {
45 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
46 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
47 if (self_base->core_num == get_core_num()) {
48 // on same core, can dispatch directly
49 irq_set_pending(self->low_priority_irq_num);
50 } else {
51 // remove the existing alarm (it may have already fired) so we don't overflow the pool with repeats
52 //
53 // note that force_alarm_id is not protected here, however if we miss removing one, they will fire
54 // almost immediately anyway (since they were set in the past)
55 alarm_id_t force_alarm_id = self->force_alarm_id;
56 if (force_alarm_id > 0) {
57 alarm_pool_cancel_alarm(self->alarm_pool, force_alarm_id);
58 }
59 // we cause an early timeout (0 is always in the past) on the alarm_pool core
60 // note that by the time this returns, the timer may already have fired, so we
61 // may end up setting self->force_alarm_id to a stale timer id, but that is fine as we
62 // will harmlessly cancel it again next time
63 self->force_alarm_id = alarm_pool_add_alarm_at_force_in_context(self->alarm_pool, from_us_since_boot(0),
64 alarm_handler, self);
65 }
66 #else
67 // on same core, can dispatch directly
68 irq_set_pending(self->low_priority_irq_num);
69 #endif
70 sem_release(&self->work_needed_sem);
71 }
72
73 // Prevent background processing in pensv and access by the other core
74 // These methods are called in pensv context and on either core
75 // They can be called recursively
lock_acquire(async_context_threadsafe_background_t * self)76 static inline void lock_acquire(async_context_threadsafe_background_t *self) {
77 // Lock the other core and stop low_prio_irq running
78 recursive_mutex_enter_blocking(&self->lock_mutex);
79 }
80
async_context_threadsafe_background_lock_check(async_context_t * self_base)81 static void async_context_threadsafe_background_lock_check(async_context_t *self_base) {
82 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
83 // Lock the other core and stop low_prio_irq running
84 if (recursive_mutex_enter_count(&self->lock_mutex) < 1 || recursive_mutex_owner(&self->lock_mutex) != lock_get_caller_owner_id()) {
85 panic_compact("async_context lock_check failed");
86 }
87 }
88
89 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
90 typedef struct sync_func_call{
91 async_when_pending_worker_t worker;
92 semaphore_t sem;
93 uint32_t (*func)(void *param);
94 void *param;
95 uint32_t rc;
96 } sync_func_call_t;
97
handle_sync_func_call(async_context_t * context,async_when_pending_worker_t * worker)98 static void handle_sync_func_call(async_context_t *context, async_when_pending_worker_t *worker) {
99 sync_func_call_t *call = (sync_func_call_t *)worker;
100 call->rc = call->func(call->param);
101 sem_release(&call->sem);
102 async_context_remove_when_pending_worker(context, worker);
103 }
104 #endif
105
lock_release(async_context_threadsafe_background_t * self)106 static void lock_release(async_context_threadsafe_background_t *self) {
107 bool outermost = 1 == recursive_mutex_enter_count(&self->lock_mutex);
108 // note that it is *not* a requirement to have low_prio_irq_missed handled on the
109 // same core and in low-priority riq, as we are only *logically* a single thread. the user
110 // is already free to call from either core, and this would only happen on a different
111 // core, if the user *themselves* is acquiring the lock from other cores anyway
112
113 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
114 bool wake_other_core = false;
115 #endif
116 if (outermost) {
117 // note that we always do a processing on outermost lock exit, to facilitate cases
118 // like lwIP where we have no notification when lwIP timers are added.
119 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
120 if (self->core.core_num == get_core_num()) {
121 process_under_lock(self);
122 } else if (async_context_base_needs_servicing(&self->core)) {
123 // have to wake up other core
124 wake_other_core = true;
125 }
126 #else
127 process_under_lock(self);
128 #endif
129 }
130 recursive_mutex_exit(&self->lock_mutex);
131 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
132 if (wake_other_core) {
133 async_context_threadsafe_background_wake_up(&self->core);
134 }
135 #endif
136 }
137
async_context_threadsafe_background_execute_sync(async_context_t * self_base,uint32_t (* func)(void * param),void * param)138 uint32_t async_context_threadsafe_background_execute_sync(async_context_t *self_base, uint32_t (*func)(void *param), void *param) {
139 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)self_base;
140 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
141 if (self_base->core_num != get_core_num()) {
142 hard_assert(!recursive_mutex_enter_count(&self->lock_mutex));
143 sync_func_call_t call;
144 call.worker.do_work = handle_sync_func_call;
145 call.func = func;
146 call.param = param;
147 sem_init(&call.sem, 0, 1);
148 async_context_add_when_pending_worker(self_base, &call.worker);
149 async_context_set_work_pending(self_base, &call.worker);
150 sem_acquire_blocking(&call.sem);
151 return call.rc;
152 }
153 #endif
154 // short-circuit if we are on the right core
155 lock_acquire(self);
156 uint32_t rc = func(param);
157 lock_release(self);
158 return rc;
159 }
160
low_prio_irq_init(async_context_threadsafe_background_t * self,uint8_t priority)161 static bool low_prio_irq_init(async_context_threadsafe_background_t *self, uint8_t priority) {
162 assert(get_core_num() == self->core.core_num);
163 int irq = user_irq_claim_unused(false);
164 if (irq < 0) return false;
165 self->low_priority_irq_num = (uint8_t) irq;
166 uint index = irq - FIRST_USER_IRQ;
167 assert(index < count_of(async_contexts_by_user_irq));
168 async_contexts_by_user_irq[index] = self;
169 irq_set_exclusive_handler(self->low_priority_irq_num, low_priority_irq_handler);
170 irq_set_enabled(self->low_priority_irq_num, true);
171 irq_set_priority(self->low_priority_irq_num, priority);
172 return true;
173 }
174
low_prio_irq_deinit(async_context_threadsafe_background_t * self)175 static void low_prio_irq_deinit(async_context_threadsafe_background_t *self) {
176 if (self->low_priority_irq_num > 0) {
177 assert(get_core_num() == self->core.core_num);
178 irq_set_enabled(self->low_priority_irq_num, false);
179 irq_remove_handler(self->low_priority_irq_num, low_priority_irq_handler);
180 user_irq_unclaim(self->low_priority_irq_num);
181 self->low_priority_irq_num = 0;
182 }
183 }
184
alarm_handler(__unused alarm_id_t id,void * user_data)185 static int64_t alarm_handler(__unused alarm_id_t id, void *user_data) {
186 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t*)user_data;
187 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
188 self->force_alarm_id = 0;
189 #endif
190 self->alarm_pending = false;
191 async_context_threadsafe_background_wake_up(&self->core);
192 return 0;
193 }
194
async_context_threadsafe_background_init(async_context_threadsafe_background_t * self,async_context_threadsafe_background_config_t * config)195 bool async_context_threadsafe_background_init(async_context_threadsafe_background_t *self, async_context_threadsafe_background_config_t *config) {
196 memset(self, 0, sizeof(*self));
197 self->core.type = &template;
198 self->core.flags = ASYNC_CONTEXT_FLAG_CALLBACK_FROM_IRQ | ASYNC_CONTEXT_FLAG_CALLBACK_FROM_NON_IRQ;
199 self->core.core_num = get_core_num();
200 if (config->custom_alarm_pool) {
201 self->alarm_pool = config->custom_alarm_pool;
202 } else {
203 #if PICO_TIME_DEFAULT_ALARM_POOL_DISABLED
204 self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
205 self->alarm_pool_owned = true;
206 #else
207 self->alarm_pool = alarm_pool_get_default();
208 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
209 if (self->core.core_num != alarm_pool_core_num(self->alarm_pool)) {
210 self->alarm_pool = alarm_pool_create_with_unused_hardware_alarm(ASYNC_CONTEXT_THREADSAFE_BACKGROUND_ALARM_POOL_MAX_ALARMS);
211 self->alarm_pool_owned = true;
212 }
213 #endif
214 #endif
215 }
216 assert(self->core.core_num == alarm_pool_core_num(self->alarm_pool));
217 sem_init(&self->work_needed_sem, 1, 1);
218 recursive_mutex_init(&self->lock_mutex);
219 bool ok = low_prio_irq_init(self, config->low_priority_irq_handler_priority);
220 return ok;
221 }
222
async_context_threadsafe_background_set_work_pending(async_context_t * self_base,async_when_pending_worker_t * worker)223 static void async_context_threadsafe_background_set_work_pending(async_context_t *self_base, async_when_pending_worker_t *worker) {
224 worker->work_pending = true;
225 async_context_threadsafe_background_wake_up(self_base);
226 }
227
async_context_threadsafe_background_deinit(async_context_t * self_base)228 static void async_context_threadsafe_background_deinit(async_context_t *self_base) {
229 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
230 // todo we do not currently handle this correctly; we could, but seems like a rare case
231 assert(get_core_num() == self_base->core_num);
232 low_prio_irq_deinit(self);
233 if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
234 #if ASYNC_CONTEXT_THREADSAFE_BACKGROUND_MULTI_CORE
235 if (self->alarm_pool_owned) {
236 alarm_pool_destroy(self->alarm_pool);
237 }
238 #endif
239 // acquire the lock to make sure the callback is not running (we have already disabled the IRQ
240 recursive_mutex_enter_blocking(&self->lock_mutex);
241 recursive_mutex_exit(&self->lock_mutex);
242 memset(self, 0, sizeof(*self));
243 }
244
process_under_lock(async_context_threadsafe_background_t * self)245 static void process_under_lock(async_context_threadsafe_background_t *self) {
246 #ifndef NDEBUG
247 async_context_threadsafe_background_lock_check(&self->core);
248 assert(self->core.core_num == get_core_num());
249 #endif
250 do {
251 absolute_time_t next_time = async_context_base_execute_once(&self->core);
252 // if the next wakeup time is in the past then loop
253 if (absolute_time_diff_us(get_absolute_time(), next_time) <= 0) continue;
254 // if there is no next wakeup time, we're done
255 if (is_at_the_end_of_time(next_time)) {
256 // cancel the alarm early (we will have been called soon after an alarm wakeup), so that
257 // we don't risk alarm_id collision.
258 if (self->alarm_id > 0) {
259 alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
260 self->alarm_id = 0;
261 }
262 break;
263 }
264 // the following is an optimization; we are often called much more frequently than timeouts actually change,
265 // and removing and re-adding the timers has some non-trivial overhead (10s of microseconds), we choose
266 // to allow the existing timeout to run to completion, and then re-asses from there, unless the new wakeup
267 // time is before the last one set.
268 //
269 // note that alarm_pending is not protected, however, if it is wrong, it is wrong in the sense that it is
270 // false when it should be true, so if it is wrong due to a race, we will cancel and re-add the alarm which is safe.
271 if (self->alarm_pending && absolute_time_diff_us(self->last_set_alarm_time, next_time) > 0) break;
272 // cancel the existing alarm (it may no longer exist)
273 if (self->alarm_id > 0) alarm_pool_cancel_alarm(self->alarm_pool, self->alarm_id);
274 self->last_set_alarm_time = next_time;
275 self->alarm_pending = true;
276 self->alarm_id = alarm_pool_add_alarm_at(self->alarm_pool, next_time, alarm_handler, self, false);
277 if (self->alarm_id > 0) break;
278 self->alarm_pending = false;
279 } while (true);
280 }
281
282 // Low priority interrupt handler to perform background processing
low_priority_irq_handler(void)283 static void low_priority_irq_handler(void) {
284 uint index = __get_current_exception() - VTABLE_FIRST_IRQ - FIRST_USER_IRQ;
285 assert(index < count_of(async_contexts_by_user_irq));
286 async_context_threadsafe_background_t *self = async_contexts_by_user_irq[index];
287 if (!self) return;
288 assert(self->core.core_num == get_core_num());
289 if (recursive_mutex_try_enter(&self->lock_mutex, NULL)) {
290 // if the recurse count is not 1 then we have pre-empted something which held the lock on the same core,
291 // so we cannot do processing here (however processing will be done when that lock is released)
292 if (recursive_mutex_enter_count(&self->lock_mutex) == 1) {
293 process_under_lock(self);
294 }
295 recursive_mutex_exit(&self->lock_mutex);
296 }
297 }
298
async_context_threadsafe_background_wait_until(__unused async_context_t * self_base,absolute_time_t until)299 static void async_context_threadsafe_background_wait_until(__unused async_context_t *self_base, absolute_time_t until) {
300 // can be called in IRQs, in which case we just have to wait
301 if (__get_current_exception()) {
302 busy_wait_until(until);
303 } else {
304 sleep_until(until);
305 }
306 }
307
async_context_threadsafe_background_wait_for_work_until(async_context_t * self_base,absolute_time_t until)308 static void async_context_threadsafe_background_wait_for_work_until(async_context_t *self_base, absolute_time_t until) {
309 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
310 sem_acquire_block_until(&self->work_needed_sem, until);
311 }
312
async_context_threadsafe_background_add_at_time_worker(async_context_t * self_base,async_at_time_worker_t * worker)313 static bool async_context_threadsafe_background_add_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
314 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
315 lock_acquire(self);
316 bool rc = async_context_base_add_at_time_worker(self_base, worker);
317 lock_release(self);
318 return rc;
319 }
320
async_context_threadsafe_background_remove_at_time_worker(async_context_t * self_base,async_at_time_worker_t * worker)321 static bool async_context_threadsafe_background_remove_at_time_worker(async_context_t *self_base, async_at_time_worker_t *worker) {
322 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
323 lock_acquire(self);
324 bool rc = async_context_base_remove_at_time_worker(self_base, worker);
325 lock_release(self);
326 return rc;
327 }
328
async_context_threadsafe_background_add_when_pending_worker(async_context_t * self_base,async_when_pending_worker_t * worker)329 static bool async_context_threadsafe_background_add_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
330 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
331 lock_acquire(self);
332 bool rc = async_context_base_add_when_pending_worker(self_base, worker);
333 lock_release(self);
334 return rc;
335 }
336
async_context_threadsafe_background_when_pending_worker(async_context_t * self_base,async_when_pending_worker_t * worker)337 static bool async_context_threadsafe_background_when_pending_worker(async_context_t *self_base, async_when_pending_worker_t *worker) {
338 async_context_threadsafe_background_t *self = (async_context_threadsafe_background_t *)self_base;
339 lock_acquire(self);
340 bool rc = async_context_base_remove_when_pending_worker(self_base, worker);
341 lock_release(self);
342 return rc;
343 }
344
async_context_threadsafe_background_acquire_lock_blocking(async_context_t * self_base)345 static void async_context_threadsafe_background_acquire_lock_blocking(async_context_t *self_base) {
346 lock_acquire((async_context_threadsafe_background_t *) self_base);
347 }
348
async_context_threadsafe_background_release_lock(async_context_t * self_base)349 static void async_context_threadsafe_background_release_lock(async_context_t *self_base) {
350 lock_release((async_context_threadsafe_background_t *)self_base);
351 }
352
353 static const async_context_type_t template = {
354 .type = ASYNC_CONTEXT_THREADSAFE_BACKGROUND,
355 .acquire_lock_blocking = async_context_threadsafe_background_acquire_lock_blocking,
356 .release_lock = async_context_threadsafe_background_release_lock,
357 .lock_check = async_context_threadsafe_background_lock_check,
358 .execute_sync = async_context_threadsafe_background_execute_sync,
359 .add_at_time_worker = async_context_threadsafe_background_add_at_time_worker,
360 .remove_at_time_worker = async_context_threadsafe_background_remove_at_time_worker,
361 .add_when_pending_worker = async_context_threadsafe_background_add_when_pending_worker,
362 .remove_when_pending_worker = async_context_threadsafe_background_when_pending_worker,
363 .set_work_pending = async_context_threadsafe_background_set_work_pending,
364 .poll = 0,
365 .wait_until = async_context_threadsafe_background_wait_until,
366 .wait_for_work_until = async_context_threadsafe_background_wait_for_work_until,
367 .deinit = async_context_threadsafe_background_deinit,
368 };
369
370
371