1 /*
2 * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdbool.h>
11 #include <string.h>
12 #include <esp_types.h>
13 #include <limits.h>
14 #include <assert.h>
15 #include "sdkconfig.h"
16 #include "freertos/FreeRTOS.h"
17 #include "freertos/task.h"
18 #include "esp_err.h"
19 #include "esp_log.h"
20 #include "esp_memory_utils.h"
21 #include "esp_intr_alloc.h"
22 #include "esp_attr.h"
23 #include "esp_cpu.h"
24 #include "esp_private/rtc_ctrl.h"
25
26 #if !CONFIG_FREERTOS_UNICORE
27 #include "esp_ipc.h"
28 #endif
29
30 static const char* TAG = "intr_alloc";
31
32 #define ETS_INTERNAL_TIMER0_INTR_NO 6
33 #define ETS_INTERNAL_TIMER1_INTR_NO 15
34 #define ETS_INTERNAL_TIMER2_INTR_NO 16
35 #define ETS_INTERNAL_SW0_INTR_NO 7
36 #define ETS_INTERNAL_SW1_INTR_NO 29
37 #define ETS_INTERNAL_PROFILING_INTR_NO 11
38
39 /*
40 Define this to debug the choices made when allocating the interrupt. This leads to much debugging
41 output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
42 being triggered, that is why it is separate from the normal LOG* scheme.
43 */
44 // #define DEBUG_INT_ALLOC_DECISIONS
45
46 #ifdef DEBUG_INT_ALLOC_DECISIONS
47 # define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__)
48 #else
49 # define ALCHLOG(...) do {} while (0)
50 #endif
51
52 typedef struct shared_vector_desc_t shared_vector_desc_t;
53 typedef struct vector_desc_t vector_desc_t;
54
55 struct shared_vector_desc_t {
56 int disabled: 1;
57 int source: 8;
58 volatile uint32_t *statusreg;
59 uint32_t statusmask;
60 intr_handler_t isr;
61 void *arg;
62 shared_vector_desc_t *next;
63 };
64
65 #define VECDESC_FL_RESERVED (1<<0)
66 #define VECDESC_FL_INIRAM (1<<1)
67 #define VECDESC_FL_SHARED (1<<2)
68 #define VECDESC_FL_NONSHARED (1<<3)
69
70 //Pack using bitfields for better memory use
71 struct vector_desc_t {
72 int flags: 16; //OR of VECDESC_FL_* defines
73 unsigned int cpu: 1;
74 unsigned int intno: 5;
75 int source: 8; //Interrupt mux flags, used when not shared
76 shared_vector_desc_t *shared_vec_info; //used when VECDESC_FL_SHARED
77 vector_desc_t *next;
78 };
79
80 struct intr_handle_data_t {
81 vector_desc_t *vector_desc;
82 shared_vector_desc_t *shared_vector_desc;
83 };
84
85 typedef struct non_shared_isr_arg_t non_shared_isr_arg_t;
86
87 struct non_shared_isr_arg_t {
88 intr_handler_t isr;
89 void *isr_arg;
90 int source;
91 };
92
93 static esp_err_t intr_free_for_current_cpu(intr_handle_t handle);
94
95 //Linked list of vector descriptions, sorted by cpu.intno value
96 static vector_desc_t *vector_desc_head = NULL;
97
98 //This bitmask has an 1 if the int should be disabled when the flash is disabled.
99 static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
100
101 //This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable.
102 static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
103 static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
104
105 static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
106
107 //Inserts an item into vector_desc list so that the list is sorted
108 //with an incrementing cpu.intno value.
insert_vector_desc(vector_desc_t * to_insert)109 static void insert_vector_desc(vector_desc_t *to_insert)
110 {
111 vector_desc_t *vd = vector_desc_head;
112 vector_desc_t *prev = NULL;
113 while(vd != NULL) {
114 if (vd->cpu > to_insert->cpu) break;
115 if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
116 prev = vd;
117 vd = vd->next;
118 }
119 if ((vector_desc_head == NULL) || (prev == NULL)) {
120 //First item
121 to_insert->next = vd;
122 vector_desc_head = to_insert;
123 } else {
124 prev->next = to_insert;
125 to_insert->next = vd;
126 }
127 }
128
129 //Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
find_desc_for_int(int intno,int cpu)130 static vector_desc_t *find_desc_for_int(int intno, int cpu)
131 {
132 vector_desc_t *vd = vector_desc_head;
133 while(vd != NULL) {
134 if (vd->cpu == cpu && vd->intno == intno) {
135 break;
136 }
137 vd = vd->next;
138 }
139 return vd;
140 }
141
142 //Returns a vector_desc entry for an intno/cpu.
143 //Either returns a preexisting one or allocates a new one and inserts
144 //it into the list. Returns NULL on malloc fail.
get_desc_for_int(int intno,int cpu)145 static vector_desc_t *get_desc_for_int(int intno, int cpu)
146 {
147 vector_desc_t *vd = find_desc_for_int(intno, cpu);
148 if (vd == NULL) {
149 vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
150 if (newvd == NULL) {
151 return NULL;
152 }
153 memset(newvd, 0, sizeof(vector_desc_t));
154 newvd->intno = intno;
155 newvd->cpu = cpu;
156 insert_vector_desc(newvd);
157 return newvd;
158 } else {
159 return vd;
160 }
161 }
162
163 //Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
find_desc_for_source(int source,int cpu)164 static vector_desc_t * find_desc_for_source(int source, int cpu)
165 {
166 vector_desc_t *vd = vector_desc_head;
167 while(vd != NULL) {
168 if (!(vd->flags & VECDESC_FL_SHARED)) {
169 if (vd->source == source && cpu == vd->cpu) {
170 break;
171 }
172 } else if (vd->cpu == cpu) {
173 // check only shared vds for the correct cpu, otherwise skip
174 bool found = false;
175 shared_vector_desc_t *svd = vd->shared_vec_info;
176 assert(svd != NULL);
177 while(svd) {
178 if (svd->source == source) {
179 found = true;
180 break;
181 }
182 svd = svd->next;
183 }
184 if (found) {
185 break;
186 }
187 }
188 vd = vd->next;
189 }
190 return vd;
191 }
192
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)193 esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
194 {
195 if (intno>31) {
196 return ESP_ERR_INVALID_ARG;
197 }
198 if (cpu >= SOC_CPU_CORES_NUM) {
199 return ESP_ERR_INVALID_ARG;
200 }
201
202 portENTER_CRITICAL(&spinlock);
203 vector_desc_t *vd = get_desc_for_int(intno, cpu);
204 if (vd == NULL) {
205 portEXIT_CRITICAL(&spinlock);
206 return ESP_ERR_NO_MEM;
207 }
208 vd->flags = VECDESC_FL_SHARED;
209 if (is_int_ram) {
210 vd->flags |= VECDESC_FL_INIRAM;
211 }
212 portEXIT_CRITICAL(&spinlock);
213
214 return ESP_OK;
215 }
216
esp_intr_reserve(int intno,int cpu)217 esp_err_t esp_intr_reserve(int intno, int cpu)
218 {
219 if (intno > 31) {
220 return ESP_ERR_INVALID_ARG;
221 }
222 if (cpu >= SOC_CPU_CORES_NUM) {
223 return ESP_ERR_INVALID_ARG;
224 }
225
226 portENTER_CRITICAL(&spinlock);
227 vector_desc_t *vd = get_desc_for_int(intno, cpu);
228 if (vd == NULL) {
229 portEXIT_CRITICAL(&spinlock);
230 return ESP_ERR_NO_MEM;
231 }
232 vd->flags = VECDESC_FL_RESERVED;
233 portEXIT_CRITICAL(&spinlock);
234
235 return ESP_OK;
236 }
237
is_vect_desc_usable(vector_desc_t * vd,int flags,int cpu,int force)238 static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force)
239 {
240 //Check if interrupt is not reserved by design
241 int x = vd->intno;
242 esp_cpu_intr_desc_t intr_desc;
243 esp_cpu_intr_get_desc(cpu, x, &intr_desc);
244
245 if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
246 ALCHLOG("....Unusable: reserved");
247 return false;
248 }
249 if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
250 ALCHLOG("....Unusable: special-purpose int");
251 return false;
252 }
253
254 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
255 //Check if the interrupt priority is acceptable
256 if (!(flags & (1 << intr_desc.priority))) {
257 ALCHLOG("....Unusable: incompatible priority");
258 return false;
259 }
260 //check if edge/level type matches what we want
261 if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
262 (((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
263 ALCHLOG("....Unusable: incompatible trigger type");
264 return false;
265 }
266 #endif
267
268 //check if interrupt is reserved at runtime
269 if (vd->flags & VECDESC_FL_RESERVED) {
270 ALCHLOG("....Unusable: reserved at runtime.");
271 return false;
272 }
273
274 //Ints can't be both shared and non-shared.
275 assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
276 //check if interrupt already is in use by a non-shared interrupt
277 if (vd->flags & VECDESC_FL_NONSHARED) {
278 ALCHLOG("....Unusable: already in (non-shared) use.");
279 return false;
280 }
281 // check shared interrupt flags
282 if (vd->flags & VECDESC_FL_SHARED) {
283 if (flags & ESP_INTR_FLAG_SHARED) {
284 bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
285 bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
286 //Bail out if int is shared, but iram property doesn't match what we want.
287 if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag)) {
288 ALCHLOG("....Unusable: shared but iram prop doesn't match");
289 return false;
290 }
291 } else {
292 //We need an unshared IRQ; can't use shared ones; bail out if this is shared.
293 ALCHLOG("...Unusable: int is shared, we need non-shared.");
294 return false;
295 }
296 } else if (esp_cpu_intr_has_handler(x)) {
297 //Check if interrupt already is allocated by esp_cpu_intr_set_handler
298 ALCHLOG("....Unusable: already allocated");
299 return false;
300 }
301
302 return true;
303 }
304
305 //Locate a free interrupt compatible with the flags given.
306 //The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
307 //When a CPU is forced, the ESP_CPU_INTR_DESC_FLAG_SPECIAL marked interrupts are also accepted.
get_available_int(int flags,int cpu,int force,int source)308 static int get_available_int(int flags, int cpu, int force, int source)
309 {
310 int x;
311 int best=-1;
312 int bestPriority=9;
313 int bestSharedCt=INT_MAX;
314
315 //Default vector desc, for vectors not in the linked list
316 vector_desc_t empty_vect_desc;
317 memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
318
319 //Level defaults to any low/med interrupt
320 if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
321 flags |= ESP_INTR_FLAG_LOWMED;
322 }
323
324 ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
325 vector_desc_t *vd = find_desc_for_source(source, cpu);
326 if (vd) {
327 // if existing vd found, don't need to search any more.
328 ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
329 if ( force != -1 && force != vd->intno ) {
330 ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
331 } else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
332 ALCHLOG("get_avalible_int: existing vd invalid.");
333 } else {
334 best = vd->intno;
335 }
336 return best;
337 }
338 if (force != -1) {
339 ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
340 //if force assigned, don't need to search any more.
341 vd = find_desc_for_int(force, cpu);
342 if (vd == NULL) {
343 //if existing vd not found, just check the default state for the intr.
344 empty_vect_desc.intno = force;
345 vd = &empty_vect_desc;
346 }
347 if (is_vect_desc_usable(vd, flags, cpu, force)) {
348 best = vd->intno;
349 } else {
350 ALCHLOG("get_avalible_int: forced vd invalid.");
351 }
352 return best;
353 }
354
355 ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
356 //No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
357 for (x = 0; x < 32; x++) {
358 //Grab the vector_desc for this vector.
359 vd = find_desc_for_int(x, cpu);
360 if (vd == NULL) {
361 empty_vect_desc.intno = x;
362 vd = &empty_vect_desc;
363 }
364
365 esp_cpu_intr_desc_t intr_desc;
366 esp_cpu_intr_get_desc(cpu, x, &intr_desc);
367
368 ALCHLOG("Int %d reserved %d priority %d %s hasIsr %d",
369 x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
370 intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL? "LEVEL" : "EDGE", esp_cpu_intr_has_handler(x));
371
372 if (!is_vect_desc_usable(vd, flags, cpu, force)) {
373 continue;
374 }
375
376 if (flags & ESP_INTR_FLAG_SHARED) {
377 //We're allocating a shared int.
378
379 //See if int already is used as a shared interrupt.
380 if (vd->flags & VECDESC_FL_SHARED) {
381 //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
382 //how useful it is.
383 int no = 0;
384 shared_vector_desc_t *svdesc = vd->shared_vec_info;
385 while (svdesc != NULL) {
386 no++;
387 svdesc = svdesc->next;
388 }
389 if (no<bestSharedCt || bestPriority > intr_desc.priority) {
390 //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
391 best = x;
392 bestSharedCt = no;
393 bestPriority = intr_desc.priority;
394 ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
395 } else {
396 ALCHLOG("...worse than int %d", best);
397 }
398 } else {
399 if (best == -1) {
400 //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
401 //not marked as shared.
402 //Remember it in case we don't find any other shared interrupt that qualifies.
403 if (bestPriority > intr_desc.priority) {
404 best = x;
405 bestPriority = intr_desc.priority;
406 ALCHLOG("...int %d usable as a new shared int", x);
407 }
408 } else {
409 ALCHLOG("...already have a shared int");
410 }
411 }
412 } else {
413 //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
414 if (bestPriority > intr_desc.priority) {
415 best = x;
416 bestPriority = intr_desc.priority;
417 } else {
418 ALCHLOG("...worse than int %d", best);
419 }
420 }
421 }
422 ALCHLOG("get_available_int: using int %d", best);
423
424 //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best.
425 return best;
426 }
427
428 //Common shared isr handler. Chain-call all ISRs.
shared_intr_isr(void * arg)429 static void IRAM_ATTR shared_intr_isr(void *arg)
430 {
431 vector_desc_t *vd = (vector_desc_t*)arg;
432 shared_vector_desc_t *sh_vec = vd->shared_vec_info;
433 portENTER_CRITICAL_ISR(&spinlock);
434 while(sh_vec) {
435 if (!sh_vec->disabled) {
436 if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
437 traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF);
438 sh_vec->isr(sh_vec->arg);
439 // check if we will return to scheduler or to interrupted task after ISR
440 if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
441 traceISR_EXIT();
442 }
443 }
444 }
445 sh_vec = sh_vec->next;
446 }
447 portEXIT_CRITICAL_ISR(&spinlock);
448 }
449
450 #if CONFIG_APPTRACE_SV_ENABLE
451 //Common non-shared isr handler wrapper.
non_shared_intr_isr(void * arg)452 static void IRAM_ATTR non_shared_intr_isr(void *arg)
453 {
454 non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t*)arg;
455 portENTER_CRITICAL_ISR(&spinlock);
456 traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF);
457 // FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
458 // when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
459 ns_isr_arg->isr(ns_isr_arg->isr_arg);
460 // check if we will return to scheduler or to interrupted task after ISR
461 if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
462 traceISR_EXIT();
463 }
464 portEXIT_CRITICAL_ISR(&spinlock);
465 }
466 #endif
467
468 //We use ESP_EARLY_LOG* here because this can be called before the scheduler is running.
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)469 esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler,
470 void *arg, intr_handle_t *ret_handle)
471 {
472 intr_handle_data_t *ret=NULL;
473 int force = -1;
474 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", esp_cpu_get_core_id());
475 //Shared interrupts should be level-triggered.
476 if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
477 return ESP_ERR_INVALID_ARG;
478 }
479 //You can't set an handler / arg for a non-C-callable interrupt.
480 if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
481 return ESP_ERR_INVALID_ARG;
482 }
483 //Shared ints should have handler and non-processor-local source
484 if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source<0)) {
485 return ESP_ERR_INVALID_ARG;
486 }
487 //Statusreg should have a mask
488 if (intrstatusreg && !intrstatusmask) {
489 return ESP_ERR_INVALID_ARG;
490 }
491 //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
492 //ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
493 //we need to make sure the interrupt is connected to the CPU0.
494 //CPU1 does not have access to the RTC fast memory through this region.
495 if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
496 return ESP_ERR_INVALID_ARG;
497 }
498
499 //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
500 if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
501 if (flags & ESP_INTR_FLAG_SHARED) {
502 flags |= ESP_INTR_FLAG_LEVEL1;
503 } else {
504 flags |= ESP_INTR_FLAG_LOWMED;
505 }
506 }
507 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", esp_cpu_get_core_id(), flags);
508
509 //Check 'special' interrupt sources. These are tied to one specific interrupt, so we
510 //have to force get_free_int to only look at that.
511 if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) {
512 force = ETS_INTERNAL_TIMER0_INTR_NO;
513 }
514 if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) {
515 force = ETS_INTERNAL_TIMER1_INTR_NO;
516 }
517 if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) {
518 force = ETS_INTERNAL_TIMER2_INTR_NO;
519 }
520 if (source == ETS_INTERNAL_SW0_INTR_SOURCE) {
521 force = ETS_INTERNAL_SW0_INTR_NO;
522 }
523 if (source == ETS_INTERNAL_SW1_INTR_SOURCE) {
524 force = ETS_INTERNAL_SW1_INTR_NO;
525 }
526 if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) {
527 force = ETS_INTERNAL_PROFILING_INTR_NO;
528 }
529
530 //Allocate a return handle. If we end up not needing it, we'll free it later on.
531 ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
532 if (ret == NULL) {
533 return ESP_ERR_NO_MEM;
534 }
535
536 portENTER_CRITICAL(&spinlock);
537 uint32_t cpu = esp_cpu_get_core_id();
538 //See if we can find an interrupt that matches the flags.
539 int intr = get_available_int(flags, cpu, force, source);
540 if (intr == -1) {
541 //None found. Bail out.
542 portEXIT_CRITICAL(&spinlock);
543 free(ret);
544 return ESP_ERR_NOT_FOUND;
545 }
546 //Get an int vector desc for int.
547 vector_desc_t *vd = get_desc_for_int(intr, cpu);
548 if (vd == NULL) {
549 portEXIT_CRITICAL(&spinlock);
550 free(ret);
551 return ESP_ERR_NO_MEM;
552 }
553
554 //Allocate that int!
555 if (flags & ESP_INTR_FLAG_SHARED) {
556 //Populate vector entry and add to linked list.
557 shared_vector_desc_t *sh_vec = heap_caps_malloc(sizeof(shared_vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
558 if (sh_vec == NULL) {
559 portEXIT_CRITICAL(&spinlock);
560 free(ret);
561 return ESP_ERR_NO_MEM;
562 }
563 memset(sh_vec, 0, sizeof(shared_vector_desc_t));
564 sh_vec->statusreg = (uint32_t*)intrstatusreg;
565 sh_vec->statusmask = intrstatusmask;
566 sh_vec->isr = handler;
567 sh_vec->arg = arg;
568 sh_vec->next = vd->shared_vec_info;
569 sh_vec->source = source;
570 sh_vec->disabled = 0;
571 vd->shared_vec_info = sh_vec;
572 vd->flags |= VECDESC_FL_SHARED;
573 //(Re-)set shared isr handler to new value.
574 esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)shared_intr_isr, vd);
575 } else {
576 //Mark as unusable for other interrupt sources. This is ours now!
577 vd->flags = VECDESC_FL_NONSHARED;
578 if (handler) {
579 #if CONFIG_APPTRACE_SV_ENABLE
580 non_shared_isr_arg_t *ns_isr_arg = heap_caps_malloc(sizeof(non_shared_isr_arg_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
581 if (!ns_isr_arg) {
582 portEXIT_CRITICAL(&spinlock);
583 free(ret);
584 return ESP_ERR_NO_MEM;
585 }
586 ns_isr_arg->isr = handler;
587 ns_isr_arg->isr_arg = arg;
588 ns_isr_arg->source = source;
589 esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)non_shared_intr_isr, ns_isr_arg);
590 #else
591 esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
592 #endif
593 }
594
595 if (flags & ESP_INTR_FLAG_EDGE) {
596 esp_cpu_intr_edge_ack(intr);
597 }
598
599 vd->source = source;
600 }
601 if (flags & ESP_INTR_FLAG_IRAM) {
602 vd->flags |= VECDESC_FL_INIRAM;
603 non_iram_int_mask[cpu] &= ~(1<<intr);
604 } else {
605 vd->flags &= ~VECDESC_FL_INIRAM;
606 non_iram_int_mask[cpu] |= (1<<intr);
607 }
608 if (source>=0) {
609 esp_rom_route_intr_matrix(cpu, source, intr);
610 }
611
612 //Fill return handle data.
613 ret->vector_desc = vd;
614 ret->shared_vector_desc = vd->shared_vec_info;
615
616 //Enable int at CPU-level;
617 ESP_INTR_ENABLE(intr);
618
619 //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
620 //of the critical section.
621 if (flags & ESP_INTR_FLAG_INTRDISABLED) {
622 esp_intr_disable(ret);
623 }
624
625 #if SOC_CPU_HAS_FLEXIBLE_INTC
626 //Extract the level from the interrupt passed flags
627 int level = esp_intr_flags_to_level(flags);
628 esp_cpu_intr_set_priority(intr, level);
629
630 if (flags & ESP_INTR_FLAG_EDGE) {
631 esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
632 } else {
633 esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
634 }
635 #endif
636
637 #if SOC_INT_PLIC_SUPPORTED
638 /* Make sure the interrupt is not delegated to user mode (IDF uses machine mode only) */
639 RV_CLEAR_CSR(mideleg, BIT(intr));
640 #endif
641
642 portEXIT_CRITICAL(&spinlock);
643
644 //Fill return handle if needed, otherwise free handle.
645 if (ret_handle != NULL) {
646 *ret_handle = ret;
647 } else {
648 free(ret);
649 }
650
651 ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %d)", source, intr, cpu);
652 return ESP_OK;
653 }
654
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)655 esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle)
656 {
657 /*
658 As an optimization, we can create a table with the possible interrupt status registers and masks for every single
659 source there is. We can then add code here to look up an applicable value and pass that to the
660 esp_intr_alloc_intrstatus function.
661 */
662 return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
663 }
664
esp_intr_set_in_iram(intr_handle_t handle,bool is_in_iram)665 esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
666 {
667 if (!handle) {
668 return ESP_ERR_INVALID_ARG;
669 }
670 vector_desc_t *vd = handle->vector_desc;
671 if (vd->flags & VECDESC_FL_SHARED) {
672 return ESP_ERR_INVALID_ARG;
673 }
674 portENTER_CRITICAL(&spinlock);
675 uint32_t mask = (1 << vd->intno);
676 if (is_in_iram) {
677 vd->flags |= VECDESC_FL_INIRAM;
678 non_iram_int_mask[vd->cpu] &= ~mask;
679 } else {
680 vd->flags &= ~VECDESC_FL_INIRAM;
681 non_iram_int_mask[vd->cpu] |= mask;
682 }
683 portEXIT_CRITICAL(&spinlock);
684 return ESP_OK;
685 }
686
687 #if !CONFIG_FREERTOS_UNICORE
intr_free_for_other_cpu(void * arg)688 static void intr_free_for_other_cpu(void *arg)
689 {
690 (void)intr_free_for_current_cpu((intr_handle_t)arg);
691 }
692 #endif /* !CONFIG_FREERTOS_UNICORE */
693
esp_intr_free(intr_handle_t handle)694 esp_err_t esp_intr_free(intr_handle_t handle)
695 {
696 if (!handle) {
697 return ESP_ERR_INVALID_ARG;
698 }
699
700 #if !CONFIG_FREERTOS_UNICORE
701 //Assign this routine to the core where this interrupt is allocated on.
702
703 bool task_can_be_run_on_any_core;
704 #if CONFIG_FREERTOS_SMP
705 UBaseType_t core_affinity = vTaskCoreAffinityGet(NULL);
706 task_can_be_run_on_any_core = (__builtin_popcount(core_affinity) > 1);
707 #else
708 UBaseType_t core_affinity = xTaskGetAffinity(NULL);
709 task_can_be_run_on_any_core = (core_affinity == tskNO_AFFINITY);
710 #endif
711
712 if (task_can_be_run_on_any_core || handle->vector_desc->cpu != esp_cpu_get_core_id()) {
713 // If the task can be run on any core then we can not rely on the current CPU id (in case if task switching occurs).
714 // It is safer to call intr_free_for_current_cpu() from a pinned to a certain CPU task. It is done through the IPC call.
715 esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &intr_free_for_other_cpu, (void *)handle);
716 return ret == ESP_OK ? ESP_OK : ESP_FAIL;
717 }
718 #endif /* !CONFIG_FREERTOS_UNICORE */
719
720 return intr_free_for_current_cpu(handle);
721 }
722
intr_free_for_current_cpu(intr_handle_t handle)723 static esp_err_t intr_free_for_current_cpu(intr_handle_t handle)
724 {
725 bool free_shared_vector = false;
726
727 portENTER_CRITICAL(&spinlock);
728 esp_intr_disable(handle);
729 if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
730 //Find and kill the shared int
731 shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
732 shared_vector_desc_t *prevsvd = NULL;
733 assert(svd); //should be something in there for a shared int
734 while (svd != NULL) {
735 if (svd == handle->shared_vector_desc) {
736 //Found it. Now kill it.
737 if (prevsvd) {
738 prevsvd->next = svd->next;
739 } else {
740 handle->vector_desc->shared_vec_info = svd->next;
741 }
742 free(svd);
743 break;
744 }
745 prevsvd = svd;
746 svd = svd->next;
747 }
748 //If nothing left, disable interrupt.
749 if (handle->vector_desc->shared_vec_info == NULL) {
750 free_shared_vector = true;
751 }
752 ESP_EARLY_LOGV(TAG,
753 "esp_intr_free: Deleting shared int: %s. Shared int is %s",
754 svd ? "not found or last one" : "deleted",
755 free_shared_vector ? "empty now." : "still in use");
756 }
757
758 if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
759 ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
760 #if CONFIG_APPTRACE_SV_ENABLE
761 if (!free_shared_vector) {
762 void *isr_arg = esp_cpu_intr_get_handler_arg(handle->vector_desc->intno);
763 if (isr_arg) {
764 free(isr_arg);
765 }
766 }
767 #endif
768 //Reset to normal handler:
769 esp_cpu_intr_set_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
770 //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
771 //we save.(We can also not use the same exit path for empty shared ints anymore if we delete
772 //the desc.) For now, just mark it as free.
773 handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
774 handle->vector_desc->source = ETS_INTERNAL_UNUSED_INTR_SOURCE;
775
776 //Also kill non_iram mask bit.
777 non_iram_int_mask[handle->vector_desc->cpu] &= ~(1<<(handle->vector_desc->intno));
778 }
779 portEXIT_CRITICAL(&spinlock);
780 free(handle);
781 return ESP_OK;
782 }
783
esp_intr_get_intno(intr_handle_t handle)784 int esp_intr_get_intno(intr_handle_t handle)
785 {
786 return handle->vector_desc->intno;
787 }
788
esp_intr_get_cpu(intr_handle_t handle)789 int esp_intr_get_cpu(intr_handle_t handle)
790 {
791 return handle->vector_desc->cpu;
792 }
793
794 /*
795 Interrupt disabling strategy:
796 If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected
797 interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE.
798 This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared
799 interrupts.
800 */
801
802 //Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled.
803 #define INT_MUX_DISABLED_INTNO 6
804
esp_intr_enable(intr_handle_t handle)805 esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
806 {
807 if (!handle) {
808 return ESP_ERR_INVALID_ARG;
809 }
810 portENTER_CRITICAL_SAFE(&spinlock);
811 int source;
812 if (handle->shared_vector_desc) {
813 handle->shared_vector_desc->disabled = 0;
814 source=handle->shared_vector_desc->source;
815 } else {
816 source=handle->vector_desc->source;
817 }
818 if (source >= 0) {
819 //Disabled using int matrix; re-connect to enable
820 esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, handle->vector_desc->intno);
821 } else {
822 //Re-enable using cpu int ena reg
823 if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
824 portEXIT_CRITICAL_SAFE(&spinlock);
825 return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
826 }
827 ESP_INTR_ENABLE(handle->vector_desc->intno);
828 }
829 portEXIT_CRITICAL_SAFE(&spinlock);
830 return ESP_OK;
831 }
832
esp_intr_disable(intr_handle_t handle)833 esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
834 {
835 if (!handle) {
836 return ESP_ERR_INVALID_ARG;
837 }
838 portENTER_CRITICAL_SAFE(&spinlock);
839 int source;
840 bool disabled = 1;
841 if (handle->shared_vector_desc) {
842 handle->shared_vector_desc->disabled = 1;
843 source=handle->shared_vector_desc->source;
844
845 shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
846 assert(svd != NULL);
847 while(svd) {
848 if (svd->source == source && svd->disabled == 0) {
849 disabled = 0;
850 break;
851 }
852 svd = svd->next;
853 }
854 } else {
855 source=handle->vector_desc->source;
856 }
857
858 if (source >= 0) {
859 if (disabled) {
860 //Disable using int matrix
861 esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
862 }
863 } else {
864 //Disable using per-cpu regs
865 if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
866 portEXIT_CRITICAL_SAFE(&spinlock);
867 return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
868 }
869 ESP_INTR_DISABLE(handle->vector_desc->intno);
870 }
871 portEXIT_CRITICAL_SAFE(&spinlock);
872 return ESP_OK;
873 }
874
esp_intr_noniram_disable(void)875 void IRAM_ATTR esp_intr_noniram_disable(void)
876 {
877 portENTER_CRITICAL_SAFE(&spinlock);
878 uint32_t oldint;
879 uint32_t cpu = esp_cpu_get_core_id();
880 uint32_t non_iram_ints = non_iram_int_mask[cpu];
881 if (non_iram_int_disabled_flag[cpu]) {
882 abort();
883 }
884 non_iram_int_disabled_flag[cpu] = true;
885 oldint = esp_cpu_intr_get_enabled_mask();
886 esp_cpu_intr_disable(non_iram_ints);
887 // Disable the RTC bit which don't want to be put in IRAM.
888 rtc_isr_noniram_disable(cpu);
889 // Save disabled ints
890 non_iram_int_disabled[cpu] = oldint & non_iram_ints;
891 portEXIT_CRITICAL_SAFE(&spinlock);
892 }
893
esp_intr_noniram_enable(void)894 void IRAM_ATTR esp_intr_noniram_enable(void)
895 {
896 portENTER_CRITICAL_SAFE(&spinlock);
897 uint32_t cpu = esp_cpu_get_core_id();
898 int non_iram_ints = non_iram_int_disabled[cpu];
899 if (!non_iram_int_disabled_flag[cpu]) {
900 abort();
901 }
902 non_iram_int_disabled_flag[cpu] = false;
903 esp_cpu_intr_enable(non_iram_ints);
904 rtc_isr_noniram_enable(cpu);
905 portEXIT_CRITICAL_SAFE(&spinlock);
906 }
907
908 //These functions are provided in ROM, but the ROM-based functions use non-multicore-capable
909 //virtualized interrupt levels. Thus, we disable them in the ld file and provide working
910 //equivalents here.
911
912
ets_isr_unmask(uint32_t mask)913 void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
914 esp_cpu_intr_enable(mask);
915 }
916
ets_isr_mask(uint32_t mask)917 void IRAM_ATTR ets_isr_mask(uint32_t mask) {
918 esp_cpu_intr_disable(mask);
919 }
920
esp_intr_enable_source(int inum)921 void esp_intr_enable_source(int inum)
922 {
923 esp_cpu_intr_enable(1 << inum);
924 }
925
esp_intr_disable_source(int inum)926 void esp_intr_disable_source(int inum)
927 {
928 esp_cpu_intr_disable(1 << inum);
929 }
930