1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdbool.h>
11 #include <string.h>
12 #include <esp_types.h>
13 #include <limits.h>
14 #include <assert.h>
15 #include "sdkconfig.h"
16 #include "freertos/FreeRTOS.h"
17 #include "freertos/task.h"
18 #include "esp_err.h"
19 #include "esp_log.h"
20 #include "esp_memory_utils.h"
21 #include "esp_intr_alloc.h"
22 #include "esp_attr.h"
23 #include "esp_cpu.h"
24 #include "esp_private/rtc_ctrl.h"
25 
26 #if !CONFIG_FREERTOS_UNICORE
27 #include "esp_ipc.h"
28 #endif
29 
30 static const char* TAG = "intr_alloc";
31 
32 #define ETS_INTERNAL_TIMER0_INTR_NO 6
33 #define ETS_INTERNAL_TIMER1_INTR_NO 15
34 #define ETS_INTERNAL_TIMER2_INTR_NO 16
35 #define ETS_INTERNAL_SW0_INTR_NO 7
36 #define ETS_INTERNAL_SW1_INTR_NO 29
37 #define ETS_INTERNAL_PROFILING_INTR_NO 11
38 
39 /*
40 Define this to debug the choices made when allocating the interrupt. This leads to much debugging
41 output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
42 being triggered, that is why it is separate from the normal LOG* scheme.
43 */
44 // #define DEBUG_INT_ALLOC_DECISIONS
45 
46 #ifdef DEBUG_INT_ALLOC_DECISIONS
47 # define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__)
48 #else
49 # define ALCHLOG(...) do {} while (0)
50 #endif
51 
52 typedef struct shared_vector_desc_t shared_vector_desc_t;
53 typedef struct vector_desc_t vector_desc_t;
54 
55 struct shared_vector_desc_t {
56     int disabled: 1;
57     int source: 8;
58     volatile uint32_t *statusreg;
59     uint32_t statusmask;
60     intr_handler_t isr;
61     void *arg;
62     shared_vector_desc_t *next;
63 };
64 
65 #define VECDESC_FL_RESERVED     (1<<0)
66 #define VECDESC_FL_INIRAM       (1<<1)
67 #define VECDESC_FL_SHARED       (1<<2)
68 #define VECDESC_FL_NONSHARED    (1<<3)
69 
70 //Pack using bitfields for better memory use
71 struct vector_desc_t {
72     int flags: 16;                          //OR of VECDESC_FL_* defines
73     unsigned int cpu: 1;
74     unsigned int intno: 5;
75     int source: 8;                          //Interrupt mux flags, used when not shared
76     shared_vector_desc_t *shared_vec_info;  //used when VECDESC_FL_SHARED
77     vector_desc_t *next;
78 };
79 
80 struct intr_handle_data_t {
81     vector_desc_t *vector_desc;
82     shared_vector_desc_t *shared_vector_desc;
83 };
84 
85 typedef struct non_shared_isr_arg_t non_shared_isr_arg_t;
86 
87 struct non_shared_isr_arg_t {
88     intr_handler_t isr;
89     void *isr_arg;
90     int source;
91 };
92 
93 //Linked list of vector descriptions, sorted by cpu.intno value
94 static vector_desc_t *vector_desc_head = NULL;
95 
96 //This bitmask has an 1 if the int should be disabled when the flash is disabled.
97 static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
98 
99 //This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable.
100 static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
101 static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
102 
103 static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
104 
105 //Inserts an item into vector_desc list so that the list is sorted
106 //with an incrementing cpu.intno value.
insert_vector_desc(vector_desc_t * to_insert)107 static void insert_vector_desc(vector_desc_t *to_insert)
108 {
109     vector_desc_t *vd = vector_desc_head;
110     vector_desc_t *prev = NULL;
111     while(vd != NULL) {
112         if (vd->cpu > to_insert->cpu) break;
113         if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
114         prev = vd;
115         vd = vd->next;
116     }
117     if ((vector_desc_head == NULL) || (prev == NULL)) {
118         //First item
119         to_insert->next = vd;
120         vector_desc_head = to_insert;
121     } else {
122         prev->next = to_insert;
123         to_insert->next = vd;
124     }
125 }
126 
127 //Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
find_desc_for_int(int intno,int cpu)128 static vector_desc_t *find_desc_for_int(int intno, int cpu)
129 {
130     vector_desc_t *vd = vector_desc_head;
131     while(vd != NULL) {
132         if (vd->cpu == cpu && vd->intno == intno) {
133             break;
134         }
135         vd = vd->next;
136     }
137     return vd;
138 }
139 
140 //Returns a vector_desc entry for an intno/cpu.
141 //Either returns a preexisting one or allocates a new one and inserts
142 //it into the list. Returns NULL on malloc fail.
get_desc_for_int(int intno,int cpu)143 static vector_desc_t *get_desc_for_int(int intno, int cpu)
144 {
145     vector_desc_t *vd = find_desc_for_int(intno, cpu);
146     if (vd == NULL) {
147         vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
148         if (newvd == NULL) {
149             return NULL;
150         }
151         memset(newvd, 0, sizeof(vector_desc_t));
152         newvd->intno = intno;
153         newvd->cpu = cpu;
154         insert_vector_desc(newvd);
155         return newvd;
156     } else {
157         return vd;
158     }
159 }
160 
161 //Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
find_desc_for_source(int source,int cpu)162 static vector_desc_t * find_desc_for_source(int source, int cpu)
163 {
164     vector_desc_t *vd = vector_desc_head;
165     while(vd != NULL) {
166         if (!(vd->flags & VECDESC_FL_SHARED)) {
167             if (vd->source == source && cpu == vd->cpu) {
168                 break;
169             }
170         } else if (vd->cpu == cpu) {
171             // check only shared vds for the correct cpu, otherwise skip
172             bool found = false;
173             shared_vector_desc_t *svd = vd->shared_vec_info;
174             assert(svd != NULL);
175             while(svd) {
176                 if (svd->source == source) {
177                     found = true;
178                     break;
179                 }
180                 svd = svd->next;
181             }
182             if (found) {
183                 break;
184             }
185         }
186         vd = vd->next;
187     }
188     return vd;
189 }
190 
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)191 esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
192 {
193     if (intno>31) {
194         return ESP_ERR_INVALID_ARG;
195     }
196     if (cpu >= SOC_CPU_CORES_NUM) {
197         return ESP_ERR_INVALID_ARG;
198     }
199 
200     portENTER_CRITICAL(&spinlock);
201     vector_desc_t *vd = get_desc_for_int(intno, cpu);
202     if (vd == NULL) {
203         portEXIT_CRITICAL(&spinlock);
204         return ESP_ERR_NO_MEM;
205     }
206     vd->flags = VECDESC_FL_SHARED;
207     if (is_int_ram) {
208         vd->flags |= VECDESC_FL_INIRAM;
209     }
210     portEXIT_CRITICAL(&spinlock);
211 
212     return ESP_OK;
213 }
214 
esp_intr_reserve(int intno,int cpu)215 esp_err_t esp_intr_reserve(int intno, int cpu)
216 {
217     if (intno > 31) {
218         return ESP_ERR_INVALID_ARG;
219     }
220     if (cpu >= SOC_CPU_CORES_NUM) {
221         return ESP_ERR_INVALID_ARG;
222     }
223 
224     portENTER_CRITICAL(&spinlock);
225     vector_desc_t *vd = get_desc_for_int(intno, cpu);
226     if (vd == NULL) {
227         portEXIT_CRITICAL(&spinlock);
228         return ESP_ERR_NO_MEM;
229     }
230     vd->flags = VECDESC_FL_RESERVED;
231     portEXIT_CRITICAL(&spinlock);
232 
233     return ESP_OK;
234 }
235 
is_vect_desc_usable(vector_desc_t * vd,int flags,int cpu,int force)236 static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force)
237 {
238     //Check if interrupt is not reserved by design
239     int x = vd->intno;
240     esp_cpu_intr_desc_t intr_desc;
241     esp_cpu_intr_get_desc(cpu, x, &intr_desc);
242 
243     if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
244         ALCHLOG("....Unusable: reserved");
245         return false;
246     }
247     if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
248         ALCHLOG("....Unusable: special-purpose int");
249         return false;
250     }
251 
252 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
253     //Check if the interrupt priority is acceptable
254     if (!(flags & (1 << intr_desc.priority))) {
255         ALCHLOG("....Unusable: incompatible priority");
256         return false;
257     }
258     //check if edge/level type matches what we want
259     if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
260         (((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
261         ALCHLOG("....Unusable: incompatible trigger type");
262         return false;
263     }
264 #endif
265 
266     //check if interrupt is reserved at runtime
267     if (vd->flags & VECDESC_FL_RESERVED)  {
268         ALCHLOG("....Unusable: reserved at runtime.");
269         return false;
270     }
271 
272     //Ints can't be both shared and non-shared.
273     assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
274     //check if interrupt already is in use by a non-shared interrupt
275     if (vd->flags & VECDESC_FL_NONSHARED) {
276         ALCHLOG("....Unusable: already in (non-shared) use.");
277         return false;
278     }
279     // check shared interrupt flags
280     if (vd->flags & VECDESC_FL_SHARED) {
281         if (flags & ESP_INTR_FLAG_SHARED) {
282             bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
283             bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
284             //Bail out if int is shared, but iram property doesn't match what we want.
285             if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag))  {
286                 ALCHLOG("....Unusable: shared but iram prop doesn't match");
287                 return false;
288             }
289         } else {
290             //We need an unshared IRQ; can't use shared ones; bail out if this is shared.
291             ALCHLOG("...Unusable: int is shared, we need non-shared.");
292             return false;
293         }
294     } else if (esp_cpu_intr_has_handler(x)) {
295         //Check if interrupt already is allocated by esp_cpu_intr_set_handler
296         ALCHLOG("....Unusable: already allocated");
297         return false;
298     }
299 
300     return true;
301 }
302 
303 //Locate a free interrupt compatible with the flags given.
304 //The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
305 //When a CPU is forced, the ESP_CPU_INTR_DESC_FLAG_SPECIAL marked interrupts are also accepted.
get_available_int(int flags,int cpu,int force,int source)306 static int get_available_int(int flags, int cpu, int force, int source)
307 {
308     int x;
309     int best=-1;
310     int bestPriority=9;
311     int bestSharedCt=INT_MAX;
312 
313     //Default vector desc, for vectors not in the linked list
314     vector_desc_t empty_vect_desc;
315     memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
316 
317     //Level defaults to any low/med interrupt
318     if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
319         flags |= ESP_INTR_FLAG_LOWMED;
320     }
321 
322     ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
323     vector_desc_t *vd = find_desc_for_source(source, cpu);
324     if (vd) {
325         // if existing vd found, don't need to search any more.
326         ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
327         if ( force != -1 && force != vd->intno ) {
328             ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
329         } else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
330             ALCHLOG("get_avalible_int: existing vd invalid.");
331         } else {
332             best = vd->intno;
333         }
334         return best;
335     }
336     if (force != -1) {
337         ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
338         //if force assigned, don't need to search any more.
339         vd = find_desc_for_int(force, cpu);
340         if (vd == NULL) {
341             //if existing vd not found, just check the default state for the intr.
342             empty_vect_desc.intno = force;
343             vd = &empty_vect_desc;
344         }
345         if (is_vect_desc_usable(vd, flags, cpu, force)) {
346             best = vd->intno;
347         } else {
348             ALCHLOG("get_avalible_int: forced vd invalid.");
349         }
350         return best;
351     }
352 
353     ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
354     //No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
355     for (x = 0; x < 32; x++) {
356         //Grab the vector_desc for this vector.
357         vd = find_desc_for_int(x, cpu);
358         if (vd == NULL) {
359             empty_vect_desc.intno = x;
360             vd = &empty_vect_desc;
361         }
362 
363         esp_cpu_intr_desc_t intr_desc;
364         esp_cpu_intr_get_desc(cpu, x, &intr_desc);
365 
366         ALCHLOG("Int %d reserved %d priority %d %s hasIsr %d",
367             x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
368             intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL? "LEVEL" : "EDGE", esp_cpu_intr_has_handler(x));
369 
370         if (!is_vect_desc_usable(vd, flags, cpu, force)) {
371             continue;
372         }
373 
374         if (flags & ESP_INTR_FLAG_SHARED) {
375             //We're allocating a shared int.
376 
377             //See if int already is used as a shared interrupt.
378             if (vd->flags & VECDESC_FL_SHARED) {
379                 //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
380                 //how useful it is.
381                 int no = 0;
382                 shared_vector_desc_t *svdesc = vd->shared_vec_info;
383                 while (svdesc != NULL) {
384                     no++;
385                     svdesc = svdesc->next;
386                 }
387                 if (no<bestSharedCt || bestPriority > intr_desc.priority) {
388                     //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
389                     best = x;
390                     bestSharedCt = no;
391                     bestPriority = intr_desc.priority;
392                     ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
393                 } else {
394                     ALCHLOG("...worse than int %d", best);
395                 }
396             } else {
397                 if (best == -1) {
398                     //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
399                     //not marked as shared.
400                     //Remember it in case we don't find any other shared interrupt that qualifies.
401                     if (bestPriority > intr_desc.priority) {
402                         best = x;
403                         bestPriority = intr_desc.priority;
404                         ALCHLOG("...int %d usable as a new shared int", x);
405                     }
406                 } else {
407                     ALCHLOG("...already have a shared int");
408                 }
409             }
410         } else {
411             //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
412             if (bestPriority > intr_desc.priority) {
413                 best = x;
414                 bestPriority = intr_desc.priority;
415             } else {
416                 ALCHLOG("...worse than int %d", best);
417             }
418         }
419     }
420     ALCHLOG("get_available_int: using int %d", best);
421 
422     //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best.
423     return best;
424 }
425 
426 //Common shared isr handler. Chain-call all ISRs.
shared_intr_isr(void * arg)427 static void IRAM_ATTR shared_intr_isr(void *arg)
428 {
429     vector_desc_t *vd = (vector_desc_t*)arg;
430     shared_vector_desc_t *sh_vec = vd->shared_vec_info;
431     portENTER_CRITICAL_ISR(&spinlock);
432     while(sh_vec) {
433         if (!sh_vec->disabled) {
434             if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
435                 traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF);
436                 sh_vec->isr(sh_vec->arg);
437                 // check if we will return to scheduler or to interrupted task after ISR
438                 if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
439                     traceISR_EXIT();
440                 }
441             }
442         }
443         sh_vec = sh_vec->next;
444     }
445     portEXIT_CRITICAL_ISR(&spinlock);
446 }
447 
448 #if CONFIG_APPTRACE_SV_ENABLE
449 //Common non-shared isr handler wrapper.
non_shared_intr_isr(void * arg)450 static void IRAM_ATTR non_shared_intr_isr(void *arg)
451 {
452     non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t*)arg;
453     portENTER_CRITICAL_ISR(&spinlock);
454     traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF);
455     // FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
456     // when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
457     ns_isr_arg->isr(ns_isr_arg->isr_arg);
458     // check if we will return to scheduler or to interrupted task after ISR
459     if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
460         traceISR_EXIT();
461     }
462     portEXIT_CRITICAL_ISR(&spinlock);
463 }
464 #endif
465 
466 //We use ESP_EARLY_LOG* here because this can be called before the scheduler is running.
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)467 esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler,
468                                         void *arg, intr_handle_t *ret_handle)
469 {
470     intr_handle_data_t *ret=NULL;
471     int force = -1;
472     ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", esp_cpu_get_core_id());
473     //Shared interrupts should be level-triggered.
474     if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
475         return ESP_ERR_INVALID_ARG;
476     }
477     //You can't set an handler / arg for a non-C-callable interrupt.
478     if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
479         return ESP_ERR_INVALID_ARG;
480     }
481     //Shared ints should have handler and non-processor-local source
482     if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source<0)) {
483         return ESP_ERR_INVALID_ARG;
484     }
485     //Statusreg should have a mask
486     if (intrstatusreg && !intrstatusmask) {
487         return ESP_ERR_INVALID_ARG;
488     }
489     //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
490     //ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
491     //we need to make sure the interrupt is connected to the CPU0.
492     //CPU1 does not have access to the RTC fast memory through this region.
493     if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
494         return ESP_ERR_INVALID_ARG;
495     }
496 
497     //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
498     if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
499         if (flags & ESP_INTR_FLAG_SHARED) {
500             flags |= ESP_INTR_FLAG_LEVEL1;
501         } else {
502             flags |= ESP_INTR_FLAG_LOWMED;
503         }
504     }
505     ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", esp_cpu_get_core_id(), flags);
506 
507     //Check 'special' interrupt sources. These are tied to one specific interrupt, so we
508     //have to force get_free_int to only look at that.
509     if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) {
510         force = ETS_INTERNAL_TIMER0_INTR_NO;
511     }
512     if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) {
513         force = ETS_INTERNAL_TIMER1_INTR_NO;
514     }
515     if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) {
516         force = ETS_INTERNAL_TIMER2_INTR_NO;
517     }
518     if (source == ETS_INTERNAL_SW0_INTR_SOURCE) {
519         force = ETS_INTERNAL_SW0_INTR_NO;
520     }
521     if (source == ETS_INTERNAL_SW1_INTR_SOURCE) {
522         force = ETS_INTERNAL_SW1_INTR_NO;
523     }
524     if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) {
525         force = ETS_INTERNAL_PROFILING_INTR_NO;
526     }
527 
528     //Allocate a return handle. If we end up not needing it, we'll free it later on.
529     ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
530     if (ret == NULL) {
531         return ESP_ERR_NO_MEM;
532     }
533 
534     portENTER_CRITICAL(&spinlock);
535     uint32_t cpu = esp_cpu_get_core_id();
536     //See if we can find an interrupt that matches the flags.
537     int intr = get_available_int(flags, cpu, force, source);
538     if (intr == -1) {
539         //None found. Bail out.
540         portEXIT_CRITICAL(&spinlock);
541         free(ret);
542         return ESP_ERR_NOT_FOUND;
543     }
544     //Get an int vector desc for int.
545     vector_desc_t *vd = get_desc_for_int(intr, cpu);
546     if (vd == NULL) {
547         portEXIT_CRITICAL(&spinlock);
548         free(ret);
549         return ESP_ERR_NO_MEM;
550     }
551 
552     //Allocate that int!
553     if (flags & ESP_INTR_FLAG_SHARED) {
554         //Populate vector entry and add to linked list.
555         shared_vector_desc_t *sh_vec=malloc(sizeof(shared_vector_desc_t));
556         if (sh_vec == NULL) {
557             portEXIT_CRITICAL(&spinlock);
558             free(ret);
559             return ESP_ERR_NO_MEM;
560         }
561         memset(sh_vec, 0, sizeof(shared_vector_desc_t));
562         sh_vec->statusreg = (uint32_t*)intrstatusreg;
563         sh_vec->statusmask = intrstatusmask;
564         sh_vec->isr = handler;
565         sh_vec->arg = arg;
566         sh_vec->next = vd->shared_vec_info;
567         sh_vec->source = source;
568         sh_vec->disabled = 0;
569         vd->shared_vec_info = sh_vec;
570         vd->flags |= VECDESC_FL_SHARED;
571         //(Re-)set shared isr handler to new value.
572         esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)shared_intr_isr, vd);
573     } else {
574         //Mark as unusable for other interrupt sources. This is ours now!
575         vd->flags = VECDESC_FL_NONSHARED;
576         if (handler) {
577 #if CONFIG_APPTRACE_SV_ENABLE
578             non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
579             if (!ns_isr_arg) {
580                 portEXIT_CRITICAL(&spinlock);
581                 free(ret);
582                 return ESP_ERR_NO_MEM;
583             }
584             ns_isr_arg->isr = handler;
585             ns_isr_arg->isr_arg = arg;
586             ns_isr_arg->source = source;
587             esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)non_shared_intr_isr, ns_isr_arg);
588 #else
589             esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
590 #endif
591         }
592 
593         if (flags & ESP_INTR_FLAG_EDGE) {
594             esp_cpu_intr_edge_ack(intr);
595         }
596 
597         vd->source = source;
598     }
599     if (flags & ESP_INTR_FLAG_IRAM) {
600         vd->flags |= VECDESC_FL_INIRAM;
601         non_iram_int_mask[cpu] &= ~(1<<intr);
602     } else {
603         vd->flags &= ~VECDESC_FL_INIRAM;
604         non_iram_int_mask[cpu] |= (1<<intr);
605     }
606     if (source>=0) {
607         esp_rom_route_intr_matrix(cpu, source, intr);
608     }
609 
610     //Fill return handle data.
611     ret->vector_desc = vd;
612     ret->shared_vector_desc = vd->shared_vec_info;
613 
614     //Enable int at CPU-level;
615     ESP_INTR_ENABLE(intr);
616 
617     //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
618     //of the critical section.
619     if (flags & ESP_INTR_FLAG_INTRDISABLED) {
620         esp_intr_disable(ret);
621     }
622 
623 #ifdef SOC_CPU_HAS_FLEXIBLE_INTC
624     //Extract the level from the interrupt passed flags
625     int level = esp_intr_flags_to_level(flags);
626     esp_cpu_intr_set_priority(intr, level);
627 
628     if (flags & ESP_INTR_FLAG_EDGE) {
629         esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
630     } else {
631         esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
632     }
633 #endif
634 
635     portEXIT_CRITICAL(&spinlock);
636 
637     //Fill return handle if needed, otherwise free handle.
638     if (ret_handle != NULL) {
639         *ret_handle = ret;
640     } else {
641         free(ret);
642     }
643 
644     ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %d)", source, intr, cpu);
645     return ESP_OK;
646 }
647 
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)648 esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle)
649 {
650     /*
651       As an optimization, we can create a table with the possible interrupt status registers and masks for every single
652       source there is. We can then add code here to look up an applicable value and pass that to the
653       esp_intr_alloc_intrstatus function.
654     */
655     return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
656 }
657 
esp_intr_set_in_iram(intr_handle_t handle,bool is_in_iram)658 esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
659 {
660     if (!handle) {
661         return ESP_ERR_INVALID_ARG;
662     }
663     vector_desc_t *vd = handle->vector_desc;
664     if (vd->flags & VECDESC_FL_SHARED) {
665       return ESP_ERR_INVALID_ARG;
666     }
667     portENTER_CRITICAL(&spinlock);
668     uint32_t mask = (1 << vd->intno);
669     if (is_in_iram) {
670         vd->flags |= VECDESC_FL_INIRAM;
671         non_iram_int_mask[vd->cpu] &= ~mask;
672     } else {
673         vd->flags &= ~VECDESC_FL_INIRAM;
674         non_iram_int_mask[vd->cpu] |= mask;
675     }
676     portEXIT_CRITICAL(&spinlock);
677     return ESP_OK;
678 }
679 
680 #if !CONFIG_FREERTOS_UNICORE
esp_intr_free_cb(void * arg)681 static void esp_intr_free_cb(void *arg)
682 {
683     (void)esp_intr_free((intr_handle_t)arg);
684 }
685 #endif /* !CONFIG_FREERTOS_UNICORE */
686 
esp_intr_free(intr_handle_t handle)687 esp_err_t esp_intr_free(intr_handle_t handle)
688 {
689     bool free_shared_vector=false;
690     if (!handle) {
691         return ESP_ERR_INVALID_ARG;
692     }
693 
694 #if !CONFIG_FREERTOS_UNICORE
695     //Assign this routine to the core where this interrupt is allocated on.
696     if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
697         esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
698         return ret == ESP_OK ? ESP_OK : ESP_FAIL;
699     }
700 #endif /* !CONFIG_FREERTOS_UNICORE */
701 
702     portENTER_CRITICAL(&spinlock);
703     esp_intr_disable(handle);
704     if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
705         //Find and kill the shared int
706         shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
707         shared_vector_desc_t *prevsvd = NULL;
708         assert(svd); //should be something in there for a shared int
709         while (svd != NULL) {
710             if (svd == handle->shared_vector_desc) {
711                 //Found it. Now kill it.
712                 if (prevsvd) {
713                     prevsvd->next = svd->next;
714                 } else {
715                     handle->vector_desc->shared_vec_info = svd->next;
716                 }
717                 free(svd);
718                 break;
719             }
720             prevsvd = svd;
721             svd = svd->next;
722         }
723         //If nothing left, disable interrupt.
724         if (handle->vector_desc->shared_vec_info == NULL) {
725             free_shared_vector = true;
726         }
727         ESP_EARLY_LOGV(TAG,
728                        "esp_intr_free: Deleting shared int: %s. Shared int is %s",
729                        svd ? "not found or last one" : "deleted",
730                        free_shared_vector ? "empty now." : "still in use");
731     }
732 
733     if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
734         ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
735 #if CONFIG_APPTRACE_SV_ENABLE
736         if (!free_shared_vector) {
737             void *isr_arg = esp_cpu_intr_get_handler_arg(handle->vector_desc->intno);
738             if (isr_arg) {
739                 free(isr_arg);
740             }
741         }
742 #endif
743         //Reset to normal handler:
744         esp_cpu_intr_set_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
745         //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
746         //we save.(We can also not use the same exit path for empty shared ints anymore if we delete
747         //the desc.) For now, just mark it as free.
748         handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
749         handle->vector_desc->source = ETS_INTERNAL_UNUSED_INTR_SOURCE;
750 
751         //Also kill non_iram mask bit.
752         non_iram_int_mask[handle->vector_desc->cpu] &= ~(1<<(handle->vector_desc->intno));
753     }
754     portEXIT_CRITICAL(&spinlock);
755     free(handle);
756     return ESP_OK;
757 }
758 
esp_intr_get_intno(intr_handle_t handle)759 int esp_intr_get_intno(intr_handle_t handle)
760 {
761     return handle->vector_desc->intno;
762 }
763 
esp_intr_get_cpu(intr_handle_t handle)764 int esp_intr_get_cpu(intr_handle_t handle)
765 {
766     return handle->vector_desc->cpu;
767 }
768 
769 /*
770  Interrupt disabling strategy:
771  If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected
772  interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE.
773  This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared
774  interrupts.
775  */
776 
777 //Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled.
778 #define INT_MUX_DISABLED_INTNO 6
779 
esp_intr_enable(intr_handle_t handle)780 esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
781 {
782     if (!handle) {
783         return ESP_ERR_INVALID_ARG;
784     }
785     portENTER_CRITICAL_SAFE(&spinlock);
786     int source;
787     if (handle->shared_vector_desc) {
788         handle->shared_vector_desc->disabled = 0;
789         source=handle->shared_vector_desc->source;
790     } else {
791         source=handle->vector_desc->source;
792     }
793     if (source >= 0) {
794         //Disabled using int matrix; re-connect to enable
795         esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, handle->vector_desc->intno);
796     } else {
797         //Re-enable using cpu int ena reg
798         if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
799             portEXIT_CRITICAL_SAFE(&spinlock);
800             return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
801         }
802         ESP_INTR_ENABLE(handle->vector_desc->intno);
803     }
804     portEXIT_CRITICAL_SAFE(&spinlock);
805     return ESP_OK;
806 }
807 
esp_intr_disable(intr_handle_t handle)808 esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
809 {
810     if (!handle) {
811         return ESP_ERR_INVALID_ARG;
812     }
813     portENTER_CRITICAL_SAFE(&spinlock);
814     int source;
815     bool disabled = 1;
816     if (handle->shared_vector_desc) {
817         handle->shared_vector_desc->disabled = 1;
818         source=handle->shared_vector_desc->source;
819 
820         shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
821         assert(svd != NULL);
822         while(svd) {
823             if (svd->source == source && svd->disabled == 0) {
824                 disabled = 0;
825                 break;
826             }
827             svd = svd->next;
828         }
829     } else {
830         source=handle->vector_desc->source;
831     }
832 
833     if (source >= 0) {
834         if (disabled) {
835             //Disable using int matrix
836             esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
837         }
838     } else {
839         //Disable using per-cpu regs
840         if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
841             portEXIT_CRITICAL_SAFE(&spinlock);
842             return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
843         }
844         ESP_INTR_DISABLE(handle->vector_desc->intno);
845     }
846     portEXIT_CRITICAL_SAFE(&spinlock);
847     return ESP_OK;
848 }
849 
esp_intr_noniram_disable(void)850 void IRAM_ATTR esp_intr_noniram_disable(void)
851 {
852     portENTER_CRITICAL_SAFE(&spinlock);
853     uint32_t oldint;
854     uint32_t cpu = esp_cpu_get_core_id();
855     uint32_t non_iram_ints = non_iram_int_mask[cpu];
856     if (non_iram_int_disabled_flag[cpu]) {
857         abort();
858     }
859     non_iram_int_disabled_flag[cpu] = true;
860     oldint = esp_cpu_intr_get_enabled_mask();
861     esp_cpu_intr_disable(non_iram_ints);
862     // Disable the RTC bit which don't want to be put in IRAM.
863     rtc_isr_noniram_disable(cpu);
864     // Save disabled ints
865     non_iram_int_disabled[cpu] = oldint & non_iram_ints;
866     portEXIT_CRITICAL_SAFE(&spinlock);
867 }
868 
esp_intr_noniram_enable(void)869 void IRAM_ATTR esp_intr_noniram_enable(void)
870 {
871     portENTER_CRITICAL_SAFE(&spinlock);
872     uint32_t cpu = esp_cpu_get_core_id();
873     int non_iram_ints = non_iram_int_disabled[cpu];
874     if (!non_iram_int_disabled_flag[cpu]) {
875         abort();
876     }
877     non_iram_int_disabled_flag[cpu] = false;
878     esp_cpu_intr_enable(non_iram_ints);
879     rtc_isr_noniram_enable(cpu);
880     portEXIT_CRITICAL_SAFE(&spinlock);
881 }
882 
883 //These functions are provided in ROM, but the ROM-based functions use non-multicore-capable
884 //virtualized interrupt levels. Thus, we disable them in the ld file and provide working
885 //equivalents here.
886 
887 
ets_isr_unmask(uint32_t mask)888 void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
889     esp_cpu_intr_enable(mask);
890 }
891 
ets_isr_mask(uint32_t mask)892 void IRAM_ATTR ets_isr_mask(uint32_t mask) {
893     esp_cpu_intr_disable(mask);
894 }
895 
esp_intr_enable_source(int inum)896 void esp_intr_enable_source(int inum)
897 {
898     esp_cpu_intr_enable(1 << inum);
899 }
900 
esp_intr_disable_source(int inum)901 void esp_intr_disable_source(int inum)
902 {
903     esp_cpu_intr_disable(1 << inum);
904 }
905