1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "hardware/irq.h"
8 #include "pico/runtime_init.h"
9
10 #include "hardware/claim.h"
11 #include "pico/mutex.h"
12 #include "pico/assert.h"
13
14 #if defined(PICO_RUNTIME_INIT_PER_CORE_IRQ_PRIORITIES) && !PICO_RUNTIME_SKIP_INIT_PER_CORE_IRQ_PRIORITIES
15 PICO_RUNTIME_INIT_FUNC_PER_CORE(runtime_init_per_core_irq_priorities, PICO_RUNTIME_INIT_PER_CORE_IRQ_PRIORITIES);
16 #endif
17
18 #if PICO_VTABLE_PER_CORE
19 static uint8_t user_irq_claimed[NUM_CORES];
user_irq_claimed_ptr(void)20 static inline uint8_t *user_irq_claimed_ptr(void) {
21 return &user_irq_claimed[get_core_num()];
22 }
23 #else
24 static uint8_t user_irq_claimed;
user_irq_claimed_ptr(void)25 static inline uint8_t *user_irq_claimed_ptr(void) {
26 return &user_irq_claimed;
27 }
28 #endif
29
get_vtable(void)30 static inline irq_handler_t *get_vtable(void) {
31 #ifdef __riscv
32 return (irq_handler_t *) (riscv_read_csr(RVCSR_MTVEC_OFFSET) & ~0x3u);
33 #else
34 return (irq_handler_t *) scb_hw->vtor;
35 #endif
36 }
37
add_thumb_bit(void * addr)38 static inline void *add_thumb_bit(void *addr) {
39 #ifdef __riscv
40 return addr;
41 #else
42 return (void *) (((uintptr_t) addr) | 0x1);
43 #endif
44 }
45
remove_thumb_bit(void * addr)46 static inline void *remove_thumb_bit(void *addr) {
47 #ifdef __riscv
48 return addr;
49 #else
50 return (void *) (((uintptr_t) addr) & (uint)~0x1);
51 #endif
52 }
53
set_raw_irq_handler_and_unlock(uint num,irq_handler_t handler,uint32_t save)54 static void set_raw_irq_handler_and_unlock(uint num, irq_handler_t handler, uint32_t save) {
55 // update vtable (vtable_handler may be same or updated depending on cases, but we do it anyway for compactness)
56 get_vtable()[VTABLE_FIRST_IRQ + num] = handler;
57 __dmb();
58 spin_unlock(spin_lock_instance(PICO_SPINLOCK_ID_IRQ), save);
59 }
60
irq_set_enabled(uint num,bool enabled)61 void irq_set_enabled(uint num, bool enabled) {
62 check_irq_param(num);
63 // really should update irq_set_mask_enabled?
64 irq_set_mask_n_enabled(num / 32, 1u << (num % 32), enabled);
65 }
66
pico_irq_is_enabled(uint num)67 bool pico_irq_is_enabled(uint num) {
68 check_irq_param(num);
69 #if PICO_RP2040
70 return 0 != ((1u << num) & *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ISER_OFFSET)));
71 #elif defined(__riscv)
72 return 0 != (hazard3_irqarray_read(RVCSR_MEIEA_OFFSET, num / 16) & (1u << (num % 16)));
73 #else
74 return 0 != (nvic_hw->iser[num/32] & (1 << num % 32));
75 #endif
76 }
77
irq_set_mask_n_enabled_internal(uint n,uint32_t mask,bool enabled)78 static inline void irq_set_mask_n_enabled_internal(uint n, uint32_t mask, bool enabled) {
79 invalid_params_if(HARDWARE_IRQ, n * 32u >= ((NUM_IRQS + 31u) & ~31u));
80 #if defined(__riscv)
81 if (enabled) {
82 hazard3_irqarray_clear(RVCSR_MEIFA_OFFSET, 2 * n, mask & 0xffffu);
83 hazard3_irqarray_clear(RVCSR_MEIFA_OFFSET, 2 * n + 1, mask >> 16);
84 hazard3_irqarray_set(RVCSR_MEIEA_OFFSET, 2 * n, mask & 0xffffu);
85 hazard3_irqarray_set(RVCSR_MEIEA_OFFSET, 2 * n + 1, mask >> 16);
86 } else {
87 hazard3_irqarray_clear(RVCSR_MEIEA_OFFSET, 2 * n, mask & 0xffffu);
88 hazard3_irqarray_clear(RVCSR_MEIEA_OFFSET, 2 * n + 1, mask >> 16);
89 }
90 #elif PICO_RP2040
91 ((void)n);
92 if (enabled) {
93 nvic_hw->icpr = mask;
94 nvic_hw->iser = mask;
95 } else {
96 nvic_hw->icer = mask;
97 }
98 #else
99 // >32 IRQs (well this works for the bottom 32 which is all that is passed in
100 if (enabled) {
101 nvic_hw->icpr[n] = mask;
102 nvic_hw->iser[n] = mask;
103 } else {
104 nvic_hw->icer[n] = mask;
105 }
106 #endif
107 }
108
irq_set_mask_enabled(uint32_t mask,bool enabled)109 void irq_set_mask_enabled(uint32_t mask, bool enabled) {
110 irq_set_mask_n_enabled_internal(0, mask, enabled);
111 }
112
irq_set_mask_n_enabled(uint n,uint32_t mask,bool enabled)113 void irq_set_mask_n_enabled(uint n, uint32_t mask, bool enabled) {
114 irq_set_mask_n_enabled_internal(n, mask, enabled);
115 }
116
irq_set_pending(uint num)117 void irq_set_pending(uint num) {
118 check_irq_param(num);
119 #ifdef __riscv
120 // Interrupt force is subsequently cleared by any read of meinext that
121 // indicates the forced IRQ (can also be cleared manually)
122 hazard3_irqarray_set(RVCSR_MEIFA_OFFSET, num / 16, 1u << (num % 16));
123 #else
124 #if PICO_RP2040
125 *((io_rw_32 *) (PPB_BASE + M0PLUS_NVIC_ISPR_OFFSET)) = 1u << num;
126 #else
127 nvic_hw->ispr[num/32] = 1 << (num % 32);
128 #endif
129 #endif
130 }
131
132 #if !PICO_DISABLE_SHARED_IRQ_HANDLERS
133 // limited by 8 bit relative links (and reality)
134 static_assert(PICO_MAX_SHARED_IRQ_HANDLERS >= 1 && PICO_MAX_SHARED_IRQ_HANDLERS < 0x7f, "");
135
136 // note these are not real functions, they are code fragments (i.e. don't call them)
137 extern void irq_handler_chain_first_slot(void);
138 extern void irq_handler_chain_remove_tail(void);
139
140 // On Arm:
141 //
142 // - The first slot begins with a tail call to irq_handler_chain_first_slot, passing a pointer to
143 // the slot in r1; this pushes the initial link register to the stack, invokes the slot's handler
144 // and then returns into the latter half of the slot
145 //
146 // - Non-first slots begin with a call of the slot's handler
147 //
148 // - Non-last slots end with a tail into the next slot in the chain
149 //
150 // - The last slot ends with a pop of the return address pushed by irq_handler_chain_first_slot
151 //
152 // On RISC-V:
153 //
154 // - The first slot begins with jal t0, irq_handler_first_chain_slot followed by a 32-bit pointer to
155 // the handler; this pushes the ultimate return address to the stack, invokes the slot's handler
156 // and then returns into the latter half of the slot
157 //
158 // - Non-first slots begin with an lui; jalr sequence to call the handler
159 //
160 // - Non-last slots end with a tail into the next slot in the chain
161 //
162 // - The last slot ends with a pop of the return address pushed by irq_handler_chain_first_slot
163 //
164 // This means the layout is different between Arm and RISC-V (though total size is the same): Arm is
165 // 6 bytes of code, 2 bytes of link and 4 bytes of pointer. RISC-V is 10 bytes of code and 2 bytes
166 // of link.
167
168 extern struct irq_handler_chain_slot {
169 #ifndef __riscv
170 uint16_t inst1;
171 uint16_t inst2;
172 #else
173 uint32_t inst1;
174 uint32_t inst2;
175 #endif
176 uint16_t inst3;
177 union {
178 // On Arm, when a handler is removed while executing, it needs a 32-bit instruction at
179 // inst3, which overwrites the link and the priority; this is ok because no one else is
180 // modifying the chain, as the chain is effectively core-local, and the user code which
181 // might still need this link disables the IRQ in question before updating, which means we
182 // aren't executing!
183 struct {
184 int8_t link;
185 uint8_t priority;
186 };
187 uint16_t inst4;
188 };
189 #ifndef __riscv
190 irq_handler_t handler;
191 #endif
192 } irq_handler_chain_slots[PICO_MAX_SHARED_IRQ_HANDLERS];
193
194 static int8_t irq_handler_chain_free_slot_head;
195
is_shared_irq_raw_handler(irq_handler_t raw_handler)196 static inline bool is_shared_irq_raw_handler(irq_handler_t raw_handler) {
197 return (uintptr_t)raw_handler - (uintptr_t)irq_handler_chain_slots < sizeof(irq_handler_chain_slots);
198 }
199
irq_has_shared_handler(uint irq_num)200 bool irq_has_shared_handler(uint irq_num) {
201 check_irq_param(irq_num);
202 irq_handler_t handler = irq_get_vtable_handler(irq_num);
203 return handler && is_shared_irq_raw_handler(handler);
204 }
205
206 #else // PICO_DISABLE_SHARED_IRQ_HANDLERS
207 #define is_shared_irq_raw_handler(h) false
irq_has_shared_handler(uint irq_num)208 bool irq_has_shared_handler(uint irq_num) {
209 return false;
210 }
211 #endif
212
213
irq_get_vtable_handler(uint num)214 irq_handler_t irq_get_vtable_handler(uint num) {
215 check_irq_param(num);
216 return get_vtable()[VTABLE_FIRST_IRQ + num];
217 }
218
irq_set_exclusive_handler(uint num,irq_handler_t handler)219 void irq_set_exclusive_handler(uint num, irq_handler_t handler) {
220 check_irq_param(num);
221 #if !PICO_NO_RAM_VECTOR_TABLE
222 spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ);
223 uint32_t save = spin_lock_blocking(lock);
224 __unused irq_handler_t current = irq_get_vtable_handler(num);
225 hard_assert(current == __unhandled_user_irq || current == handler);
226 set_raw_irq_handler_and_unlock(num, handler, save);
227 #else
228 panic_unsupported();
229 #endif
230 }
231
irq_get_exclusive_handler(uint num)232 irq_handler_t irq_get_exclusive_handler(uint num) {
233 check_irq_param(num);
234 #if !PICO_NO_RAM_VECTOR_TABLE
235 spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ);
236 uint32_t save = spin_lock_blocking(lock);
237 irq_handler_t current = irq_get_vtable_handler(num);
238 spin_unlock(lock, save);
239 if (current == __unhandled_user_irq || is_shared_irq_raw_handler(current)) {
240 return NULL;
241 }
242 return current;
243 #else
244 panic_unsupported();
245 #endif
246 }
247
248
249 #if !PICO_DISABLE_SHARED_IRQ_HANDLERS
250
251 #ifndef __riscv
252
make_j_16(uint16_t * from,void * to)253 static uint16_t make_j_16(uint16_t *from, void *to) {
254 uint32_t ui_from = (uint32_t)from;
255 uint32_t ui_to = (uint32_t)to;
256 int32_t delta = (int32_t)(ui_to - ui_from - 4);
257 assert(delta >= -2048 && delta <= 2046 && !(delta & 1));
258 return (uint16_t)(0xe000 | ((delta >> 1) & 0x7ff));
259 }
260
insert_bl_32(uint16_t * from,void * to)261 static void insert_bl_32(uint16_t *from, void *to) {
262 uint32_t ui_from = (uint32_t)from;
263 uint32_t ui_to = (uint32_t)to;
264 uint32_t delta = (ui_to - ui_from - 4) / 2;
265 assert(!(delta >> 11u));
266 from[0] = (uint16_t)(0xf000 | ((delta >> 11u) & 0x7ffu));
267 from[1] = (uint16_t)(0xf800 | (delta & 0x7ffu));
268 }
269
resolve_j_16(uint16_t * inst)270 static inline void *resolve_j_16(uint16_t *inst) {
271 assert(0x1c == (*inst)>>11u);
272 int32_t i_addr = (*inst) << 21u;
273 i_addr /= (int32_t)(1u<<21u);
274 return inst + 2 + i_addr;
275 }
276
277 #else
278
make_jal_16(uint16_t * from,void * to)279 static uint16_t make_jal_16(uint16_t *from, void *to) {
280 uint32_t ui_from = (uint32_t)from;
281 uint32_t ui_to = (uint32_t)to;
282 int32_t delta = (int32_t)(ui_to - ui_from);
283 assert(delta >= -2048 && delta <= 2046 && !(delta & 1));
284 return 0x2001u | riscv_encode_imm_cj((uint32_t)delta);
285 }
286
make_j_16(uint16_t * from,void * to)287 static uint16_t make_j_16(uint16_t *from, void *to) {
288 return 0x8000u | make_jal_16(from, to);
289 }
290
make_call_inst1(void * to)291 static inline uint32_t make_call_inst1(void *to) {
292 // lui ra, %hi(to)
293 return 0x000000b7u | riscv_encode_imm_u_hi((uintptr_t)to);
294 }
295
make_call_inst2(void * to)296 static inline uint32_t make_call_inst2(void *to) {
297 // jalr ra, %lo(to)(ra)
298 return 0x000080e7u | riscv_encode_imm_i((uintptr_t)to);
299 }
300
make_jal_t0_32(uint32_t * from,void * to)301 static inline uint32_t make_jal_t0_32(uint32_t *from, void *to) {
302 // jal t0, to
303 return 0x000002efu | riscv_encode_imm_j((uintptr_t)to - (uintptr_t)from);
304 }
305
resolve_j_16(uint16_t * inst)306 static void *resolve_j_16(uint16_t *inst) {
307 uint32_t inst32 = (uint32_t)*inst;
308 uint32_t udiff =
309 ((inst32 & 0x0038) >> 2) +
310 ((inst32 & 0x0800) >> 7) +
311 ((inst32 & 0x0004) << 3) +
312 ((inst32 & 0x0080) >> 1) +
313 ((inst32 & 0x0040) << 1) +
314 ((inst32 & 0x0600) >> 1) +
315 ((inst32 & 0x0100) << 2) -
316 ((inst32 & 0x2000) >> 2);
317 return (void *)((uint32_t)inst + udiff);
318 }
319
320 #endif
321
322 // GCC produces horrible code for subtraction of pointers here, and it was bugging me
slot_diff(struct irq_handler_chain_slot * to,struct irq_handler_chain_slot * from)323 static inline int8_t slot_diff(struct irq_handler_chain_slot *to, struct irq_handler_chain_slot *from) {
324 static_assert(sizeof(struct irq_handler_chain_slot) == 12, "");
325 #ifdef __riscv
326 // todo I think RISC-V also deserves a fancy pointer diff implementation
327 return (int8_t)(to - from);
328 #else
329 int32_t result = 0xaaaa;
330 // return (to - from);
331 // note this implementation has limited range, but is fine for plenty more than -128->127 result
332 pico_default_asm (
333 "subs %1, %2\n"
334 "adcs %1, %1\n" // * 2 (and + 1 if negative for rounding)
335 "muls %0, %1\n"
336 "lsrs %0, %0, #20\n"
337 : "+l" (result), "+l" (to)
338 : "l" (from)
339 : "cc"
340 );
341 return (int8_t)result;
342 #endif
343 }
344
345 #ifndef __riscv
346 static const uint16_t inst16_return_from_last_slot = 0xbd01; // pop {r0, pc}
347 #else
348 static const uint16_t inst16_return_from_last_slot = 0xbe42; // cm.popret {ra}, 16
349 #endif
350
get_slot_index(struct irq_handler_chain_slot * slot)351 static inline int8_t get_slot_index(struct irq_handler_chain_slot *slot) {
352 return slot_diff(slot, irq_handler_chain_slots);
353 }
354 #endif
355
irq_add_shared_handler(uint num,irq_handler_t handler,uint8_t order_priority)356 void irq_add_shared_handler(uint num, irq_handler_t handler, uint8_t order_priority) {
357 check_irq_param(num);
358 #if PICO_NO_RAM_VECTOR_TABLE
359 panic_unsupported();
360 #elif PICO_DISABLE_SHARED_IRQ_HANDLERS
361 irq_set_exclusive_handler(num, handler);
362 #else
363 spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ);
364 uint32_t save = spin_lock_blocking(lock);
365 hard_assert(irq_handler_chain_free_slot_head >= 0); // we must have a slot
366 struct irq_handler_chain_slot *slot = &irq_handler_chain_slots[irq_handler_chain_free_slot_head];
367 int8_t slot_index = irq_handler_chain_free_slot_head;
368 irq_handler_chain_free_slot_head = slot->link;
369 irq_handler_t vtable_handler = get_vtable()[VTABLE_FIRST_IRQ + num];
370 if (!is_shared_irq_raw_handler(vtable_handler)) {
371 // start new chain
372 hard_assert(vtable_handler == __unhandled_user_irq);
373 struct irq_handler_chain_slot slot_data = {
374 #ifndef __riscv
375 .inst1 = 0xa100, // add r1, pc, #0
376 .inst2 = make_j_16(&slot->inst2, (void *) irq_handler_chain_first_slot), // b irq_handler_chain_first_slot
377 .handler = handler,
378 #else
379 .inst1 = make_jal_t0_32(&slot->inst1, irq_handler_chain_first_slot), // jal t0, irq_handler_chain_first_slot
380 .inst2 = (uint32_t)handler, // (t0 points to handler)
381 #endif
382 .inst3 = inst16_return_from_last_slot,
383 .link = -1,
384 .priority = order_priority
385 };
386 *slot = slot_data;
387 vtable_handler = (irq_handler_t)add_thumb_bit(slot);
388 } else {
389 assert(!((((uintptr_t)remove_thumb_bit(vtable_handler)) - ((uintptr_t)irq_handler_chain_slots)) % sizeof(struct irq_handler_chain_slot)));
390 struct irq_handler_chain_slot *prev_slot = NULL;
391 struct irq_handler_chain_slot *existing_vtable_slot = remove_thumb_bit((void *) vtable_handler);
392 struct irq_handler_chain_slot *cur_slot = existing_vtable_slot;
393 while (cur_slot->priority > order_priority) {
394 prev_slot = cur_slot;
395 if (cur_slot->link < 0) break;
396 cur_slot = &irq_handler_chain_slots[cur_slot->link];
397 }
398 if (prev_slot) {
399 // insert into chain
400 struct irq_handler_chain_slot slot_data = {
401 #ifndef __riscv
402 .inst1 = 0x4801, // ldr r0, [pc, #4]
403 .inst2 = 0x4780, // blx r0
404 .handler = handler,
405 #else
406 .inst1 = make_call_inst1(handler), // lui ra, %hi(handler)
407 .inst2 = make_call_inst2(handler), // jalr ra, %lo(handler)(ra)
408 #endif
409 .inst3 = prev_slot->link >= 0 ?
410 make_j_16(&slot->inst3, resolve_j_16(&prev_slot->inst3)) : // b next_slot
411 inst16_return_from_last_slot,
412 .link = prev_slot->link,
413 .priority = order_priority
414 };
415 // update code and data links
416 prev_slot->inst3 = make_j_16(&prev_slot->inst3, slot),
417 prev_slot->link = slot_index;
418 *slot = slot_data;
419 } else {
420 // update with new chain head
421 struct irq_handler_chain_slot slot_data = {
422 #ifndef __riscv
423 .inst1 = 0xa100, // add r1, pc, #0
424 .inst2 = make_j_16(&slot->inst2, (void *) irq_handler_chain_first_slot), // b irq_handler_chain_first_slot
425 .handler = handler,
426 #else
427 .inst1 = make_jal_t0_32(&slot->inst1, irq_handler_chain_first_slot), // jal t0, irq_handler_chain_first_slot
428 .inst2 = (uint32_t)handler, // (t0 points to handler)
429 #endif
430 .inst3 = make_j_16(&slot->inst3, existing_vtable_slot), // b existing_slot
431 .link = get_slot_index(existing_vtable_slot),
432 .priority = order_priority,
433 };
434 *slot = slot_data;
435 // fixup previous head slot
436 #ifndef __riscv
437 existing_vtable_slot->inst1 = 0x4801; // ldr r0, [pc, #4]
438 existing_vtable_slot->inst2 = 0x4780; // blx r0
439 #else
440 // todo lock-freeness?
441 void *handler_of_existing_head = (void*)existing_vtable_slot->inst2;
442 existing_vtable_slot->inst1 = make_call_inst1(handler_of_existing_head);
443 existing_vtable_slot->inst2 = make_call_inst2(handler_of_existing_head);
444 #endif
445 vtable_handler = (irq_handler_t)add_thumb_bit(slot);
446 }
447 }
448 set_raw_irq_handler_and_unlock(num, vtable_handler, save);
449 #endif // !PICO_NO_RAM_VECTOR_TABLE && !PICO_DISABLE_SHARED_IRQ_HANDLERS
450 }
451
452 #if !PICO_DISABLE_SHARED_IRQ_HANDLERS
handler_from_slot(struct irq_handler_chain_slot * slot)453 static inline irq_handler_t handler_from_slot(struct irq_handler_chain_slot *slot) {
454 #ifndef __riscv
455 return slot->handler;
456 #else
457 if (slot->inst1 & 0x8u) {
458 // jal t0, irq_handler_chain_first_slot; .word handler
459 return (irq_handler_t)slot->inst2;
460 } else {
461 // lui ra, %hi(handler); jalr ra, %lo(handler)(ra)
462 return (irq_handler_t)(
463 ((slot->inst1 >> 12) << 12) + (uint32_t)((int32_t)slot->inst2 >> 20)
464 );
465 }
466 #endif
467 }
468 #endif
469
irq_remove_handler(uint num,irq_handler_t handler)470 void irq_remove_handler(uint num, irq_handler_t handler) {
471 #if !PICO_NO_RAM_VECTOR_TABLE
472 spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ);
473 uint32_t save = spin_lock_blocking(lock);
474 irq_handler_t vtable_handler = get_vtable()[VTABLE_FIRST_IRQ + num];
475 if (vtable_handler != __unhandled_user_irq && vtable_handler != handler) {
476 #if !PICO_DISABLE_SHARED_IRQ_HANDLERS
477 if (is_shared_irq_raw_handler(vtable_handler)) {
478 // This is a bit tricky, as an executing IRQ handler doesn't take a lock.
479
480 // First thing to do is to disable the IRQ in question; that takes care of calls from user code.
481 // Note that a irq handler chain is local to our own core, so we don't need to worry about the other core
482 bool was_enabled = pico_irq_is_enabled(num);
483 irq_set_enabled(num, false);
484 __dmb();
485
486 // It is possible we are being called while an IRQ for this chain is already in progress.
487 // The issue we have here is that we must not free a slot that is currently being executed, because
488 // inst3 is still to be executed, and inst3 might get overwritten if the slot is re-used.
489
490 // By disallowing other exceptions from removing an IRQ handler (which seems fair)
491 // we now only have to worry about removing a slot from a chain that is currently executing.
492
493 // Note we expect that the slot we are deleting is the one that is executing.
494 // In particular, bad things happen if the caller were to delete the handler in the chain
495 // before it. This is not an allowed use case though, and I can't imagine anyone wanting to in practice.
496 // Sadly this is not something we can detect.
497
498 uint exception = __get_current_exception();
499 hard_assert(!exception || exception == num + VTABLE_FIRST_IRQ);
500
501 struct irq_handler_chain_slot *prev_slot = NULL;
502 struct irq_handler_chain_slot *existing_vtable_slot = remove_thumb_bit((void *) vtable_handler);
503 struct irq_handler_chain_slot *to_free_slot = existing_vtable_slot;
504 while (handler_from_slot(to_free_slot) != handler) {
505 prev_slot = to_free_slot;
506 if (to_free_slot->link < 0) break;
507 to_free_slot = &irq_handler_chain_slots[to_free_slot->link];
508 }
509 if (handler_from_slot(to_free_slot) == handler) {
510 int8_t next_slot_index = to_free_slot->link;
511 if (next_slot_index >= 0) {
512 // There is another slot in the chain, so copy that over us, so that our inst3 points at something valid
513 // Note this only matters in the exception case anyway, and it that case, we will skip the next handler,
514 // however in that case its IRQ cause should immediately cause re-entry of the IRQ and the only side
515 // effect will be that there was potentially brief out of priority order execution of the handlers
516 struct irq_handler_chain_slot *next_slot = &irq_handler_chain_slots[next_slot_index];
517 #ifndef __riscv
518 to_free_slot->handler = next_slot->handler;
519 #else
520 irq_handler_t handler_of_next_slot = handler_from_slot(next_slot);
521 if (to_free_slot == existing_vtable_slot) {
522 to_free_slot->inst2 = (uint32_t)handler_of_next_slot;
523 } else {
524 to_free_slot->inst1 = make_call_inst1(handler_of_next_slot);
525 to_free_slot->inst2 = make_call_inst2(handler_of_next_slot);
526 }
527 #endif
528 to_free_slot->priority = next_slot->priority;
529 to_free_slot->link = next_slot->link;
530 to_free_slot->inst3 = next_slot->link >= 0 ?
531 make_j_16(&to_free_slot->inst3, resolve_j_16(&next_slot->inst3)) : // b next_>slot->next_slot
532 inst16_return_from_last_slot,
533
534 // add old next slot back to free list
535 next_slot->link = irq_handler_chain_free_slot_head;
536 irq_handler_chain_free_slot_head = next_slot_index;
537 } else {
538 // Slot being removed is at the end of the chain
539 if (!exception) {
540 // case when we're not in exception, we physically unlink now
541 if (prev_slot) {
542 // chain is not empty
543 prev_slot->link = -1;
544 prev_slot->inst3 = inst16_return_from_last_slot;
545 } else {
546 // chain is not empty
547 vtable_handler = __unhandled_user_irq;
548 }
549 // add slot back to free list
550 to_free_slot->link = irq_handler_chain_free_slot_head;
551 irq_handler_chain_free_slot_head = get_slot_index(to_free_slot);
552 } else {
553 // since we are the last slot we know that our inst3 hasn't executed yet, so we change
554 // it to bl to irq_handler_chain_remove_tail which will remove the slot.
555 #ifndef __riscv
556 // NOTE THAT THIS TRASHES PRIORITY AND LINK SINCE THIS IS A 4 BYTE INSTRUCTION
557 // BUT THEY ARE NOT NEEDED NOW
558 insert_bl_32(&to_free_slot->inst3, (void *) irq_handler_chain_remove_tail);
559 #else
560 to_free_slot->inst3 = make_jal_16(&to_free_slot->inst3, (void*) irq_handler_chain_remove_tail);
561 #endif
562 }
563 }
564 } else {
565 assert(false); // not found
566 }
567 irq_set_enabled(num, was_enabled);
568 }
569 #else
570 assert(false); // not found
571 #endif
572 } else {
573 vtable_handler = __unhandled_user_irq;
574 }
575 set_raw_irq_handler_and_unlock(num, vtable_handler, save);
576 #else
577 panic_unsupported();
578 #endif
579 }
580
581 #ifndef __riscv
nvic_ipr0(void)582 static io_rw_32 *nvic_ipr0(void) {
583 return (io_rw_32 *)(PPB_BASE + ARM_CPU_PREFIXED(NVIC_IPR0_OFFSET));
584 }
585 #endif
586
irq_set_priority(uint num,uint8_t hardware_priority)587 void irq_set_priority(uint num, uint8_t hardware_priority) {
588 check_irq_param(num);
589 #ifdef __riscv
590 // SDK priorities are upside down due to Cortex-M influence
591 hardware_priority = (uint8_t)((hardware_priority >> 4) ^ 0xf);
592 // There is no atomic field write operation, so first drop the IRQ to its
593 // lowest priority (safe even if it is in a preemption frame below us) and
594 // then use a set to raise it to the target priority.
595 hazard3_irqarray_clear(RVCSR_MEIPRA_OFFSET, num / 4, 0xfu << (4 * (num % 4)));
596 hazard3_irqarray_set(RVCSR_MEIPRA_OFFSET, num / 4, hardware_priority << (4 * (num % 4)));
597 #else
598 io_rw_32 *p = nvic_ipr0() + (num >> 2);
599 // note that only 32 bit writes are supported
600 *p = (*p & ~(0xffu << (8 * (num & 3u)))) | (((uint32_t) hardware_priority) << (8 * (num & 3u)));
601 #endif
602 }
603
irq_get_priority(uint num)604 uint irq_get_priority(uint num) {
605 check_irq_param(num);
606 #ifdef __riscv
607 uint16_t priority_row = (uint16_t) hazard3_irqarray_read(RVCSR_MEIPRA_OFFSET, num / 4u);
608 uint8_t priority_4bit = (priority_row >> (4 * (num % 4))) & 0xfu;
609 return ((priority_4bit ^ 0xfu) << 4u);
610 #else
611 // note that only 32 bit reads are supported
612 io_rw_32 *p = nvic_ipr0() + (num >> 2);
613 return (uint8_t)(*p >> (8 * (num & 3u)));
614 #endif
615 }
616
617 #if !PICO_DISABLE_SHARED_IRQ_HANDLERS
618 // used by irq_handler_chain.S to remove the last link in a handler chain after it executes
619 // note this must be called only with the last slot in a chain (and during the exception)
irq_add_tail_to_free_list(struct irq_handler_chain_slot * slot)620 void irq_add_tail_to_free_list(struct irq_handler_chain_slot *slot) {
621 irq_handler_t slot_handler = (irq_handler_t) add_thumb_bit(slot);
622 assert(is_shared_irq_raw_handler(slot_handler));
623
624 uint exception = __get_current_exception();
625 assert(exception);
626 spin_lock_t *lock = spin_lock_instance(PICO_SPINLOCK_ID_IRQ);
627 uint32_t save = spin_lock_blocking(lock);
628 int8_t slot_index = get_slot_index(slot);
629 if (slot_handler == get_vtable()[exception]) {
630 get_vtable()[exception] = __unhandled_user_irq;
631 } else {
632 bool __unused found = false;
633 // need to find who points at the slot and update it
634 for(uint i=0;i<count_of(irq_handler_chain_slots);i++) {
635 if (irq_handler_chain_slots[i].link == slot_index) {
636 irq_handler_chain_slots[i].link = -1;
637 irq_handler_chain_slots[i].inst3 = inst16_return_from_last_slot;
638 found = true;
639 break;
640 }
641 }
642 assert(found);
643 }
644 // add slot to free list
645 slot->link = irq_handler_chain_free_slot_head;
646 irq_handler_chain_free_slot_head = slot_index;
647 spin_unlock(lock, save);
648 }
649 #endif
650
runtime_init_per_core_irq_priorities(void)651 __weak void runtime_init_per_core_irq_priorities(void) {
652 #if PICO_DEFAULT_IRQ_PRIORITY != 0
653 #ifndef __riscv
654 // static_assert(!(NUM_IRQS & 3), ""); // this isn't really required - the reg is still 32 bit
655 uint32_t prio4 = (PICO_DEFAULT_IRQ_PRIORITY & 0xff) * 0x1010101u;
656 io_rw_32 *p = nvic_ipr0();
657 for (uint i = 0; i < (NUM_IRQS + 3) / 4; i++) {
658 *p++ = prio4;
659 }
660 #else
661 for (uint i = 0; i < NUM_IRQS; ++i) {
662 irq_set_priority(i, PICO_DEFAULT_IRQ_PRIORITY);
663 }
664 #endif
665 #endif
666 }
667
get_user_irq_claim_index(uint irq_num)668 static uint get_user_irq_claim_index(uint irq_num) {
669 invalid_params_if(HARDWARE_IRQ, irq_num < FIRST_USER_IRQ || irq_num >= NUM_IRQS);
670 // we count backwards from the last, to match the existing hard coded uses of user IRQs in the SDK which were previously using 31
671 static_assert(NUM_IRQS - FIRST_USER_IRQ <= 8, ""); // we only use a single byte's worth of claim bits today.
672 return NUM_IRQS - irq_num - 1u;
673 }
674
user_irq_claim(uint irq_num)675 void user_irq_claim(uint irq_num) {
676 hw_claim_or_assert(user_irq_claimed_ptr(), get_user_irq_claim_index(irq_num), "User IRQ is already claimed");
677 }
678
user_irq_unclaim(uint irq_num)679 void user_irq_unclaim(uint irq_num) {
680 hw_claim_clear(user_irq_claimed_ptr(), get_user_irq_claim_index(irq_num));
681 }
682
user_irq_claim_unused(bool required)683 int user_irq_claim_unused(bool required) {
684 int bit = hw_claim_unused_from_range(user_irq_claimed_ptr(), required, 0, NUM_USER_IRQS - 1, "No user IRQs are available");
685 if (bit >= 0) bit = (int)NUM_IRQS - bit - 1;
686 return bit;
687 }
688
user_irq_is_claimed(uint irq_num)689 bool user_irq_is_claimed(uint irq_num) {
690 return hw_is_claimed(user_irq_claimed_ptr(), get_user_irq_claim_index(irq_num));
691 }
692
693 #ifdef __riscv
encode_j_instruction(uintptr_t from,uintptr_t to)694 static uint32_t encode_j_instruction(uintptr_t from, uintptr_t to) {
695 intptr_t delta = (intptr_t) (to - from);
696 invalid_params_if(HARDWARE_IRQ, delta & 1);
697 valid_params_if(HARDWARE_IRQ, ((delta >> 21) == 0 || (delta >> 21) == -1)); // range check +- 1 MiB
698 return 0x6fu | riscv_encode_imm_j((uint32_t)delta);
699 }
700
irq_set_riscv_vector_handler(enum riscv_vector_num index,irq_handler_t handler)701 irq_handler_t irq_set_riscv_vector_handler(enum riscv_vector_num index, irq_handler_t handler) {
702 invalid_params_if(HARDWARE_IRQ, index > RISCV_VEC_MACHINE_EXTERNAL_IRQ);
703 irq_handler_t *vtable = get_vtable();
704 valid_params_if(HARDWARE_IRQ, ((uintptr_t)vtable & 0x3) == 0x1); // check we are in vector mode
705 irq_handler_t old = vtable[index];
706 vtable[index] = (irq_handler_t)encode_j_instruction((uintptr_t)&vtable[index], (uintptr_t)handler);
707 return old;
708 }
709 #endif
710