1 /*
2 * Copyright (c) 2021-2025 Espressif Systems (Shanghai) Co., Ltd.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/irq.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <stdbool.h>
13 #include <string.h>
14 #include <soc.h>
15 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
16 #include <esp_memory_utils.h>
17 #include <esp_attr.h>
18 #include <esp_cpu.h>
19 #include <esp_rom_sys.h>
20 #include <esp_private/rtc_ctrl.h>
21 #include <limits.h>
22 #include <assert.h>
23 #include <soc/soc.h>
24
25 #include <zephyr/logging/log.h>
26 LOG_MODULE_REGISTER(intc_esp32, CONFIG_LOG_DEFAULT_LEVEL);
27
28 #define ETS_INTERNAL_TIMER0_INTR_NO 6
29 #define ETS_INTERNAL_TIMER1_INTR_NO 15
30 #define ETS_INTERNAL_TIMER2_INTR_NO 16
31 #define ETS_INTERNAL_SW0_INTR_NO 7
32 #define ETS_INTERNAL_SW1_INTR_NO 29
33 #define ETS_INTERNAL_PROFILING_INTR_NO 11
34
35 #define VECDESC_FL_RESERVED (1 << 0)
36 #define VECDESC_FL_INIRAM (1 << 1)
37 #define VECDESC_FL_SHARED (1 << 2)
38 #define VECDESC_FL_NONSHARED (1 << 3)
39
40 /*
41 * Define this to debug the choices made when allocating the interrupt. This leads to much debugging
42 * output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
43 * being triggered, that is why it is separate from the normal LOG* scheme.
44 */
45 #ifdef CONFIG_INTC_ESP32_DECISIONS_LOG
46 # define INTC_LOG(...) LOG_INF(__VA_ARGS__)
47 #else
48 # define INTC_LOG(...) do {} while (false)
49 #endif
50
51 /* Typedef for C-callable interrupt handler function */
52 typedef void (*intc_dyn_handler_t)(const void *);
53
54 /* Linked list of vector descriptions, sorted by cpu.intno value */
55 static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
56
57 /* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
58 static uint32_t non_iram_int_mask[CONFIG_MP_MAX_NUM_CPUS];
59 /* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
60 static uint32_t non_iram_int_disabled[CONFIG_MP_MAX_NUM_CPUS];
61 static bool non_iram_int_disabled_flag[CONFIG_MP_MAX_NUM_CPUS];
62
63 /*
64 * Inserts an item into vector_desc list so that the list is sorted
65 * with an incrementing cpu.intno value.
66 */
insert_vector_desc(struct vector_desc_t * to_insert)67 static void insert_vector_desc(struct vector_desc_t *to_insert)
68 {
69 struct vector_desc_t *vd = vector_desc_head;
70 struct vector_desc_t *prev = NULL;
71
72 while (vd != NULL) {
73 if (vd->cpu > to_insert->cpu) {
74 break;
75 }
76 if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) {
77 break;
78 }
79 prev = vd;
80 vd = vd->next;
81 }
82 if ((vector_desc_head == NULL) || (prev == NULL)) {
83 /* First item */
84 to_insert->next = vd;
85 vector_desc_head = to_insert;
86 } else {
87 prev->next = to_insert;
88 to_insert->next = vd;
89 }
90 }
91
92 /* Returns a vector_desc entry for an intno/cpu, or NULL if none exists. */
find_desc_for_int(int intno,int cpu)93 static struct vector_desc_t *find_desc_for_int(int intno, int cpu)
94 {
95 struct vector_desc_t *vd = vector_desc_head;
96
97 while (vd != NULL) {
98 if (vd->cpu == cpu && vd->intno == intno) {
99 break;
100 }
101 vd = vd->next;
102 }
103 return vd;
104 }
105
106 /*
107 * Returns a vector_desc entry for an intno/cpu.
108 * Either returns a preexisting one or allocates a new one and inserts
109 * it into the list. Returns NULL on malloc fail.
110 */
get_desc_for_int(int intno,int cpu)111 static struct vector_desc_t *get_desc_for_int(int intno, int cpu)
112 {
113 struct vector_desc_t *vd = find_desc_for_int(intno, cpu);
114
115 if (vd == NULL) {
116 struct vector_desc_t *newvd = k_malloc(sizeof(struct vector_desc_t));
117
118 if (newvd == NULL) {
119 return NULL;
120 }
121 memset(newvd, 0, sizeof(struct vector_desc_t));
122 newvd->intno = intno;
123 newvd->cpu = cpu;
124 insert_vector_desc(newvd);
125 return newvd;
126 } else {
127 return vd;
128 }
129 }
130
131 /*
132 * Returns a vector_desc entry for an source, the cpu parameter is used
133 * to tell GPIO_INT and GPIO_NMI from different CPUs
134 */
find_desc_for_source(int source,int cpu)135 static struct vector_desc_t *find_desc_for_source(int source, int cpu)
136 {
137 struct vector_desc_t *vd = vector_desc_head;
138
139 while (vd != NULL) {
140 if (!(vd->flags & VECDESC_FL_SHARED)) {
141 if (vd->source == source && cpu == vd->cpu) {
142 break;
143 }
144 } else if (vd->cpu == cpu) {
145 /* check only shared vds for the correct cpu, otherwise skip */
146 bool found = false;
147 struct shared_vector_desc_t *svd = vd->shared_vec_info;
148
149 assert(svd != NULL);
150 while (svd) {
151 if (svd->source == source) {
152 found = true;
153 break;
154 }
155 svd = svd->next;
156 }
157 if (found) {
158 break;
159 }
160 }
161 vd = vd->next;
162 }
163 return vd;
164 }
165
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)166 int esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
167 {
168 if (intno >= SOC_CPU_INTR_NUM) {
169 return -EINVAL;
170 }
171 if (cpu >= arch_num_cpus()) {
172 return -EINVAL;
173 }
174
175 unsigned int key = irq_lock();
176 struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
177
178 if (vd == NULL) {
179 irq_unlock(key);
180 return -ENOMEM;
181 }
182 vd->flags = VECDESC_FL_SHARED;
183 if (is_int_ram) {
184 vd->flags |= VECDESC_FL_INIRAM;
185 }
186 irq_unlock(key);
187
188 return 0;
189 }
190
esp_intr_reserve(int intno,int cpu)191 int esp_intr_reserve(int intno, int cpu)
192 {
193 if (intno >= SOC_CPU_INTR_NUM) {
194 return -EINVAL;
195 }
196 if (cpu >= arch_num_cpus()) {
197 return -EINVAL;
198 }
199
200 unsigned int key = irq_lock();
201 struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
202
203 if (vd == NULL) {
204 irq_unlock(key);
205 return -ENOMEM;
206 }
207 vd->flags = VECDESC_FL_RESERVED;
208 irq_unlock(key);
209
210 return 0;
211 }
212
213 /* Returns true if handler for interrupt is not the default unhandled interrupt handler */
intr_has_handler(int intr,int cpu)214 static bool intr_has_handler(int intr, int cpu)
215 {
216 bool r;
217
218 r = _sw_isr_table[intr * CONFIG_MP_MAX_NUM_CPUS + cpu].isr != z_irq_spurious;
219
220 return r;
221 }
222
is_vect_desc_usable(struct vector_desc_t * vd,int flags,int cpu,int force)223 static bool is_vect_desc_usable(struct vector_desc_t *vd, int flags, int cpu, int force)
224 {
225 /* Check if interrupt is not reserved by design */
226 int x = vd->intno;
227 esp_cpu_intr_desc_t intr_desc;
228 esp_cpu_intr_get_desc(cpu, x, &intr_desc);
229
230 if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
231 INTC_LOG("....Unusable: reserved");
232 return false;
233 }
234 if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
235 INTC_LOG("....Unusable: special-purpose int");
236 return false;
237 }
238
239 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
240 /* Check if the interrupt level is acceptable */
241 if (!(flags & (1 << intr_desc.priority))) {
242 INTC_LOG("....Unusable: incompatible level");
243 return false;
244 }
245 /* check if edge/level type matches what we want */
246 if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
247 (((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
248 INTC_LOG("....Unusable: incompatible trigger type");
249 return false;
250 }
251 #endif
252
253 /* check if interrupt is reserved at runtime */
254 if (vd->flags & VECDESC_FL_RESERVED) {
255 INTC_LOG("....Unusable: reserved at runtime.");
256 return false;
257 }
258
259 /* Ints can't be both shared and non-shared. */
260 assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
261 /* check if interrupt already is in use by a non-shared interrupt */
262 if (vd->flags & VECDESC_FL_NONSHARED) {
263 INTC_LOG("....Unusable: already in (non-shared) use.");
264 return false;
265 }
266 /* check shared interrupt flags */
267 if (vd->flags & VECDESC_FL_SHARED) {
268 if (flags & ESP_INTR_FLAG_SHARED) {
269 bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
270 bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
271 /*
272 * Bail out if int is shared, but iram property
273 * doesn't match what we want.
274 */
275 if ((vd->flags & VECDESC_FL_SHARED) &&
276 (desc_in_iram_flag != in_iram_flag)) {
277 INTC_LOG("....Unusable: shared but iram prop doesn't match");
278 return false;
279 }
280 } else {
281 /*
282 * We need an unshared IRQ; can't use shared ones;
283 * bail out if this is shared.
284 */
285 INTC_LOG("...Unusable: int is shared, we need non-shared.");
286 return false;
287 }
288 } else if (intr_has_handler(x, cpu)) {
289 INTC_LOG("....Unusable: already allocated");
290 return false;
291 }
292
293 return true;
294 }
295
296 /*
297 * Locate a free interrupt compatible with the flags given.
298 * The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
299 * When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
300 */
get_available_int(int flags,int cpu,int force,int source)301 static int get_available_int(int flags, int cpu, int force, int source)
302 {
303 int x;
304 int best = -1;
305 int best_level = 9;
306 int best_shared_ct = INT_MAX;
307 /* Default vector desc, for vectors not in the linked list */
308 struct vector_desc_t empty_vect_desc;
309
310 memset(&empty_vect_desc, 0, sizeof(struct vector_desc_t));
311
312 /* Level defaults to any low/med interrupt */
313 if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
314 flags |= ESP_INTR_FLAG_LOWMED;
315 }
316
317 INTC_LOG("%s: try to find existing. Cpu: %d, Source: %d", __func__, cpu, source);
318 struct vector_desc_t *vd = find_desc_for_source(source, cpu);
319
320 if (vd) {
321 /* if existing vd found, don't need to search any more. */
322 INTC_LOG("%s: existing vd found. intno: %d", __func__, vd->intno);
323 if (force != -1 && force != vd->intno) {
324 INTC_LOG("%s: intr forced but not match existing. "
325 "existing intno: %d, force: %d", __func__, vd->intno, force);
326 } else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
327 INTC_LOG("%s: existing vd invalid.", __func__);
328 } else {
329 best = vd->intno;
330 }
331 return best;
332 }
333 if (force != -1) {
334 INTC_LOG("%s: try to find force. "
335 "Cpu: %d, Source: %d, Force: %d", __func__, cpu, source, force);
336 /* if force assigned, don't need to search any more. */
337 vd = find_desc_for_int(force, cpu);
338 if (vd == NULL) {
339 /* if existing vd not found, just check the default state for the intr. */
340 empty_vect_desc.intno = force;
341 vd = &empty_vect_desc;
342 }
343 if (is_vect_desc_usable(vd, flags, cpu, force)) {
344 best = vd->intno;
345 } else {
346 INTC_LOG("%s: forced vd invalid.", __func__);
347 }
348 return best;
349 }
350
351 INTC_LOG("%s: start looking. Current cpu: %d", __func__, cpu);
352 /* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
353 for (x = 0; x < SOC_CPU_INTR_NUM; x++) {
354 /* Grab the vector_desc for this vector. */
355 vd = find_desc_for_int(x, cpu);
356 if (vd == NULL) {
357 empty_vect_desc.intno = x;
358 vd = &empty_vect_desc;
359 }
360
361 esp_cpu_intr_desc_t intr_desc;
362 esp_cpu_intr_get_desc(cpu, x, &intr_desc);
363
364 INTC_LOG("Int %d reserved %d level %d %s hasIsr %d",
365 x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
366 intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL ? "LEVEL" : "EDGE",
367 intr_has_handler(x, cpu));
368
369 if (!is_vect_desc_usable(vd, flags, cpu, force)) {
370 continue;
371 }
372
373 if (flags & ESP_INTR_FLAG_SHARED) {
374 /* We're allocating a shared int. */
375
376 /* See if int already is used as a shared interrupt. */
377 if (vd->flags & VECDESC_FL_SHARED) {
378 /*
379 * We can use this already-marked-as-shared interrupt. Count the
380 * already attached isrs in order to see how useful it is.
381 */
382 int no = 0;
383 struct shared_vector_desc_t *svdesc = vd->shared_vec_info;
384
385 while (svdesc != NULL) {
386 no++;
387 svdesc = svdesc->next;
388 }
389 if (no < best_shared_ct ||
390 best_level > intr_desc.priority) {
391 /*
392 * Seems like this shared vector is both okay and has
393 * the least amount of ISRs already attached to it.
394 */
395 best = x;
396 best_shared_ct = no;
397 best_level = intr_desc.priority;
398 INTC_LOG("...int %d more usable as a shared int: "
399 "has %d existing vectors", x, no);
400 } else {
401 INTC_LOG("...worse than int %d", best);
402 }
403 } else {
404 if (best == -1) {
405 /*
406 * We haven't found a feasible shared interrupt yet.
407 * This one is still free and usable, even if not
408 * marked as shared.
409 * Remember it in case we don't find any other shared
410 * interrupt that qualifies.
411 */
412 if (best_level > intr_desc.priority) {
413 best = x;
414 best_level = intr_desc.priority;
415 INTC_LOG("...int %d usable as new shared int", x);
416 }
417 } else {
418 INTC_LOG("...already have a shared int");
419 }
420 }
421 } else {
422 /*
423 * Seems this interrupt is feasible. Select it and break out of the loop
424 * No need to search further.
425 */
426 if (best_level > intr_desc.priority) {
427 best = x;
428 best_level = intr_desc.priority;
429 } else {
430 INTC_LOG("...worse than int %d", best);
431 }
432 }
433 }
434 INTC_LOG("%s: using int %d", __func__, best);
435
436 /*
437 * By now we have looked at all potential interrupts and
438 * hopefully have selected the best one in best.
439 */
440 return best;
441 }
442
443 /* Common shared isr handler. Chain-call all ISRs. */
shared_intr_isr(void * arg)444 static void IRAM_ATTR shared_intr_isr(void *arg)
445 {
446 struct vector_desc_t *vd = (struct vector_desc_t *)arg;
447 struct shared_vector_desc_t *sh_vec = vd->shared_vec_info;
448
449 unsigned int key = irq_lock();
450 while (sh_vec) {
451 if (!sh_vec->disabled) {
452 if (!(sh_vec->statusreg) || (*sh_vec->statusreg & sh_vec->statusmask)) {
453 sh_vec->isr(sh_vec->arg);
454 }
455 }
456 sh_vec = sh_vec->next;
457 }
458 irq_unlock(key);
459 }
460
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)461 int esp_intr_alloc_intrstatus(int source,
462 int flags,
463 uint32_t intrstatusreg,
464 uint32_t intrstatusmask,
465 intr_handler_t handler,
466 void *arg,
467 intr_handle_t *ret_handle)
468 {
469 intr_handle_data_t *ret = NULL;
470 int force = -1;
471
472 INTC_LOG("%s (cpu %d): checking args", __func__, esp_cpu_get_core_id());
473 /* Shared interrupts should be level-triggered. */
474 if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
475 return -EINVAL;
476 }
477 /* You can't set an handler / arg for a non-C-callable interrupt. */
478 if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
479 return -EINVAL;
480 }
481 /* Shared ints should have handler and non-processor-local source */
482 if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) {
483 return -EINVAL;
484 }
485 /* Statusreg should have a mask */
486 if (intrstatusreg && !intrstatusmask) {
487 return -EINVAL;
488 }
489 /*
490 * If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
491 * If we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
492 * we need to make sure the interrupt is connected to the CPU0.
493 * CPU1 does not have access to the RTC fast memory through this region.
494 */
495 if ((flags & ESP_INTR_FLAG_IRAM) && handler &&
496 !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
497 return -EINVAL;
498 }
499
500 /*
501 * Default to prio 1 for shared interrupts.
502 * Default to prio 1, 2 or 3 for non-shared interrupts.
503 */
504 if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
505 if (flags & ESP_INTR_FLAG_SHARED) {
506 flags |= ESP_INTR_FLAG_LEVEL1;
507 } else {
508 flags |= ESP_INTR_FLAG_LOWMED;
509 }
510 }
511 INTC_LOG("%s (cpu %d): Args okay."
512 "Resulting flags 0x%X", __func__, esp_cpu_get_core_id(), flags);
513
514 /*
515 * Check 'special' interrupt sources. These are tied to one specific
516 * interrupt, so we have to force get_available_int to only look at that.
517 */
518 switch (source) {
519 case ETS_INTERNAL_TIMER0_INTR_SOURCE:
520 force = ETS_INTERNAL_TIMER0_INTR_NO;
521 break;
522 case ETS_INTERNAL_TIMER1_INTR_SOURCE:
523 force = ETS_INTERNAL_TIMER1_INTR_NO;
524 break;
525 case ETS_INTERNAL_TIMER2_INTR_SOURCE:
526 force = ETS_INTERNAL_TIMER2_INTR_NO;
527 break;
528 case ETS_INTERNAL_SW0_INTR_SOURCE:
529 force = ETS_INTERNAL_SW0_INTR_NO;
530 break;
531 case ETS_INTERNAL_SW1_INTR_SOURCE:
532 force = ETS_INTERNAL_SW1_INTR_NO;
533 break;
534 case ETS_INTERNAL_PROFILING_INTR_SOURCE:
535 force = ETS_INTERNAL_PROFILING_INTR_NO;
536 break;
537 default:
538 break;
539 }
540
541 /* Allocate a return handle. If we end up not needing it, we'll free it later on. */
542 ret = k_malloc(sizeof(struct intr_handle_data_t));
543 if (ret == NULL) {
544 return -ENOMEM;
545 }
546
547 unsigned int key = irq_lock();
548 int cpu = esp_cpu_get_core_id();
549 /* See if we can find an interrupt that matches the flags. */
550 int intr = get_available_int(flags, cpu, force, source);
551
552 if (intr == -1) {
553 /* None found. Bail out. */
554 irq_unlock(key);
555 k_free(ret);
556 return -ENODEV;
557 }
558 /* Get an int vector desc for int. */
559 struct vector_desc_t *vd = get_desc_for_int(intr, cpu);
560
561 if (vd == NULL) {
562 irq_unlock(key);
563 k_free(ret);
564 return -ENOMEM;
565 }
566
567 /* Allocate that int! */
568 if (flags & ESP_INTR_FLAG_SHARED) {
569 /* Populate vector entry and add to linked list. */
570 struct shared_vector_desc_t *sv = k_malloc(sizeof(struct shared_vector_desc_t));
571
572 if (sv == NULL) {
573 irq_unlock(key);
574 k_free(ret);
575 return -ENOMEM;
576 }
577 memset(sv, 0, sizeof(struct shared_vector_desc_t));
578 sv->statusreg = (uint32_t *)intrstatusreg;
579 sv->statusmask = intrstatusmask;
580 sv->isr = handler;
581 sv->arg = arg;
582 sv->next = vd->shared_vec_info;
583 sv->source = source;
584 sv->disabled = 0;
585 vd->shared_vec_info = sv;
586 vd->flags |= VECDESC_FL_SHARED;
587
588 /* Disable interrupt to avoid assert at IRQ install */
589 irq_disable(intr);
590
591 /* (Re-)set shared isr handler to new value. */
592 irq_connect_dynamic(intr, 0, (intc_dyn_handler_t)shared_intr_isr, vd, 0);
593 } else {
594 /* Mark as unusable for other interrupt sources. This is ours now! */
595 vd->flags = VECDESC_FL_NONSHARED;
596 if (handler) {
597 irq_disable(intr);
598 irq_connect_dynamic(intr, 0, (intc_dyn_handler_t)handler, arg, 0);
599 }
600 if (flags & ESP_INTR_FLAG_EDGE) {
601 esp_cpu_intr_edge_ack(intr);
602 }
603 vd->source = source;
604 }
605 if (flags & ESP_INTR_FLAG_IRAM) {
606 vd->flags |= VECDESC_FL_INIRAM;
607 non_iram_int_mask[cpu] &= ~(1 << intr);
608 } else {
609 vd->flags &= ~VECDESC_FL_INIRAM;
610 non_iram_int_mask[cpu] |= (1 << intr);
611 }
612 if (source >= 0) {
613 esp_rom_route_intr_matrix(cpu, source, intr);
614 }
615
616 /* Fill return handle data. */
617 ret->vector_desc = vd;
618 ret->shared_vector_desc = vd->shared_vec_info;
619
620 /* Enable int at CPU-level; */
621 irq_enable(intr);
622
623 /*
624 * If interrupt has to be started disabled, do that now; ints won't be enabled for
625 * real until the end of the critical section.
626 */
627 if (flags & ESP_INTR_FLAG_INTRDISABLED) {
628 esp_intr_disable(ret);
629 }
630
631 #if SOC_CPU_HAS_FLEXIBLE_INTC
632 /* Extract the level from the interrupt passed flags */
633 int level = esp_intr_flags_to_level(flags);
634 esp_cpu_intr_set_priority(intr, level);
635
636 if (flags & ESP_INTR_FLAG_EDGE) {
637 esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
638 } else {
639 esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
640 }
641 #endif
642
643 #if SOC_INT_PLIC_SUPPORTED
644 /* Make sure the interrupt is not delegated to user mode (IDF uses machine mode only) */
645 RV_CLEAR_CSR(mideleg, BIT(intr));
646 #endif
647
648 irq_unlock(key);
649
650 /* Fill return handle if needed, otherwise free handle. */
651 if (ret_handle != NULL) {
652 *ret_handle = ret;
653 } else {
654 k_free(ret);
655 }
656
657 LOG_DBG("Connected src %d to int %d (cpu %d)", source, intr, cpu);
658
659 return 0;
660 }
661
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)662 int esp_intr_alloc(int source,
663 int flags,
664 intr_handler_t handler,
665 void *arg,
666 intr_handle_t *ret_handle)
667 {
668 /*
669 * As an optimization, we can create a table with the possible interrupt status
670 * registers and masks for every single source there is. We can then add code here to
671 * look up an applicable value and pass that to the esp_intr_alloc_intrstatus function.
672 */
673 return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
674 }
675
esp_intr_set_in_iram(intr_handle_t handle,bool is_in_iram)676 int IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
677 {
678 if (!handle) {
679 return -EINVAL;
680 }
681 struct vector_desc_t *vd = handle->vector_desc;
682
683 if (vd->flags & VECDESC_FL_SHARED) {
684 return -EINVAL;
685 }
686 unsigned int key = irq_lock();
687 uint32_t mask = (1 << vd->intno);
688
689 if (is_in_iram) {
690 vd->flags |= VECDESC_FL_INIRAM;
691 non_iram_int_mask[vd->cpu] &= ~mask;
692 } else {
693 vd->flags &= ~VECDESC_FL_INIRAM;
694 non_iram_int_mask[vd->cpu] |= mask;
695 }
696 irq_unlock(key);
697 return 0;
698 }
699
esp_intr_free(intr_handle_t handle)700 int esp_intr_free(intr_handle_t handle)
701 {
702 bool free_shared_vector = false;
703
704 if (!handle) {
705 return -EINVAL;
706 }
707
708 unsigned int key = irq_lock();
709 esp_intr_disable(handle);
710 if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
711 /* Find and kill the shared int */
712 struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
713 struct shared_vector_desc_t *prevsvd = NULL;
714
715 assert(svd); /* should be something in there for a shared int */
716 while (svd != NULL) {
717 if (svd == handle->shared_vector_desc) {
718 /* Found it. Now kill it. */
719 if (prevsvd) {
720 prevsvd->next = svd->next;
721 } else {
722 handle->vector_desc->shared_vec_info = svd->next;
723 }
724 k_free(svd);
725 break;
726 }
727 prevsvd = svd;
728 svd = svd->next;
729 }
730 /* If nothing left, disable interrupt. */
731 if (handle->vector_desc->shared_vec_info == NULL) {
732 free_shared_vector = true;
733 }
734 INTC_LOG("%s: Deleting shared int: %s. "
735 "Shared int is %s", __func__, svd ? "not found or last one" : "deleted",
736 free_shared_vector ? "empty now." : "still in use");
737 }
738
739 if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
740 INTC_LOG("%s: Disabling int, killing handler", __func__);
741
742 /* Disable interrupt to avoid assert at IRQ install */
743 irq_disable(handle->vector_desc->intno);
744
745 /* Reset IRQ handler */
746 irq_connect_dynamic(handle->vector_desc->intno, 0,
747 (intc_dyn_handler_t)z_irq_spurious,
748 (void *)((int)handle->vector_desc->intno), 0);
749 /*
750 * Theoretically, we could free the vector_desc... not sure if that's worth the
751 * few bytes of memory we save.(We can also not use the same exit path for empty
752 * shared ints anymore if we delete the desc.) For now, just mark it as free.
753 */
754 handle->vector_desc->flags &=
755 ~(VECDESC_FL_NONSHARED | VECDESC_FL_RESERVED | VECDESC_FL_SHARED);
756 handle->vector_desc->source = ETS_INTERNAL_UNUSED_INTR_SOURCE;
757 /* Also kill non_iram mask bit. */
758 non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno));
759 }
760 irq_unlock(key);
761 k_free(handle);
762 return 0;
763 }
764
esp_intr_get_intno(intr_handle_t handle)765 int esp_intr_get_intno(intr_handle_t handle)
766 {
767 return handle->vector_desc->intno;
768 }
769
esp_intr_get_cpu(intr_handle_t handle)770 int esp_intr_get_cpu(intr_handle_t handle)
771 {
772 return handle->vector_desc->cpu;
773 }
774
775 /**
776 * Interrupt disabling strategy:
777 * If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a
778 * non-connected interrupt. If the source is <0 (meaning an internal, per-cpu interrupt).
779 * This allows us to, for the muxed CPUs, disable an int from
780 * the other core. It also allows disabling shared interrupts.
781 */
782
783 /*
784 * Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29
785 * cause the interrupt to effectively be disabled.
786 */
787 #define INT_MUX_DISABLED_INTNO 6
788
esp_intr_enable(intr_handle_t handle)789 int IRAM_ATTR esp_intr_enable(intr_handle_t handle)
790 {
791 if (!handle) {
792 return -EINVAL;
793 }
794 unsigned int key = irq_lock();
795 int source;
796
797 if (handle->shared_vector_desc) {
798 handle->shared_vector_desc->disabled = 0;
799 source = handle->shared_vector_desc->source;
800 } else {
801 source = handle->vector_desc->source;
802 }
803 if (source >= 0) {
804 /* Disabled using int matrix; re-connect to enable */
805 esp_rom_route_intr_matrix(handle->vector_desc->cpu, source,
806 handle->vector_desc->intno);
807 } else {
808 /* Re-enable using cpu int ena reg */
809 if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
810 irq_unlock(key);
811 return -EINVAL; /* Can only enable these ints on this cpu */
812 }
813 irq_enable(handle->vector_desc->intno);
814 }
815 irq_unlock(key);
816 return 0;
817 }
818
esp_intr_disable(intr_handle_t handle)819 int IRAM_ATTR esp_intr_disable(intr_handle_t handle)
820 {
821 if (!handle) {
822 return -EINVAL;
823 }
824 unsigned int key = irq_lock();
825 int source;
826 bool disabled = 1;
827
828 if (handle->shared_vector_desc) {
829 handle->shared_vector_desc->disabled = 1;
830 source = handle->shared_vector_desc->source;
831
832 struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
833
834 assert(svd != NULL);
835 while (svd) {
836 if (svd->source == source && svd->disabled == 0) {
837 disabled = 0;
838 break;
839 }
840 svd = svd->next;
841 }
842 } else {
843 source = handle->vector_desc->source;
844 }
845
846 if (source >= 0) {
847 if (disabled) {
848 /* Disable using int matrix */
849 esp_rom_route_intr_matrix(handle->vector_desc->cpu, source,
850 INT_MUX_DISABLED_INTNO);
851 }
852 } else {
853 /* Disable using per-cpu regs */
854 if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
855 irq_unlock(key);
856 return -EINVAL; /* Can only enable these ints on this cpu */
857 }
858 irq_disable(handle->vector_desc->intno);
859 }
860 irq_unlock(key);
861 return 0;
862 }
863
esp_intr_noniram_disable(void)864 void IRAM_ATTR esp_intr_noniram_disable(void)
865 {
866 unsigned int key = irq_lock();
867 int oldint;
868 int cpu = esp_cpu_get_core_id();
869 int non_iram_ints = non_iram_int_mask[cpu];
870
871 if (non_iram_int_disabled_flag[cpu]) {
872 abort();
873 }
874 non_iram_int_disabled_flag[cpu] = true;
875 oldint = esp_cpu_intr_get_enabled_mask();
876 esp_cpu_intr_disable(non_iram_ints);
877 rtc_isr_noniram_disable(cpu);
878 /* Save which ints we did disable */
879 non_iram_int_disabled[cpu] = oldint & non_iram_ints;
880 irq_unlock(key);
881 }
882
esp_intr_noniram_enable(void)883 void IRAM_ATTR esp_intr_noniram_enable(void)
884 {
885 unsigned int key = irq_lock();
886 int cpu = esp_cpu_get_core_id();
887 int non_iram_ints = non_iram_int_disabled[cpu];
888
889 if (!non_iram_int_disabled_flag[cpu]) {
890 abort();
891 }
892 non_iram_int_disabled_flag[cpu] = false;
893 esp_cpu_intr_enable(non_iram_ints);
894 rtc_isr_noniram_enable(cpu);
895 irq_unlock(key);
896 }
897
898 #if defined(CONFIG_RISCV)
899 /*
900 * Functions below are implemented to keep consistency with current
901 * Xtensa chips API behavior. When accessing Zephyr's API
902 * directly, the CPU IRQs can be enabled or disabled directly. This
903 * is mostly used to control lines that are not muxed, thus bypass the
904 * interrupt matrix. For RISCV, these functions are not expected to
905 * be used via user API, as peripherals are all routed through INTMUX
906 * and shared interrupts require managing sources state.
907 */
arch_irq_enable(unsigned int irq)908 void arch_irq_enable(unsigned int irq)
909 {
910 esp_cpu_intr_enable(1 << irq);
911 }
912
arch_irq_disable(unsigned int irq)913 void arch_irq_disable(unsigned int irq)
914 {
915 esp_cpu_intr_disable(1 << irq);
916 }
917
arch_irq_is_enabled(unsigned int irq)918 int arch_irq_is_enabled(unsigned int irq)
919 {
920 return !!(esp_cpu_intr_get_enabled_mask() & (1 << irq));
921 }
922 #endif
923