1 /*
2 * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdbool.h>
11 #include <string.h>
12 #include <esp_types.h>
13 #include <limits.h>
14 #include <assert.h>
15 #include "sdkconfig.h"
16 #include "freertos/FreeRTOS.h"
17 #include "freertos/task.h"
18 #include "esp_err.h"
19 #include "esp_log.h"
20 #include "esp_intr_alloc.h"
21 #include "esp_attr.h"
22 #include "hal/cpu_hal.h"
23 #include "hal/interrupt_controller_hal.h"
24
25 #if !CONFIG_FREERTOS_UNICORE
26 #include "esp_ipc.h"
27 #endif
28
29 static const char* TAG = "intr_alloc";
30
31 #define ETS_INTERNAL_TIMER0_INTR_NO 6
32 #define ETS_INTERNAL_TIMER1_INTR_NO 15
33 #define ETS_INTERNAL_TIMER2_INTR_NO 16
34 #define ETS_INTERNAL_SW0_INTR_NO 7
35 #define ETS_INTERNAL_SW1_INTR_NO 29
36 #define ETS_INTERNAL_PROFILING_INTR_NO 11
37
38 /*
39 Define this to debug the choices made when allocating the interrupt. This leads to much debugging
40 output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
41 being triggered, that is why it is separate from the normal LOG* scheme.
42 */
43 // #define DEBUG_INT_ALLOC_DECISIONS
44
45 #ifdef DEBUG_INT_ALLOC_DECISIONS
46 # define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__)
47 #else
48 # define ALCHLOG(...) do {} while (0)
49 #endif
50
51 typedef struct shared_vector_desc_t shared_vector_desc_t;
52 typedef struct vector_desc_t vector_desc_t;
53
54 struct shared_vector_desc_t {
55 int disabled: 1;
56 int source: 8;
57 volatile uint32_t *statusreg;
58 uint32_t statusmask;
59 intr_handler_t isr;
60 void *arg;
61 shared_vector_desc_t *next;
62 };
63
64 #define VECDESC_FL_RESERVED (1<<0)
65 #define VECDESC_FL_INIRAM (1<<1)
66 #define VECDESC_FL_SHARED (1<<2)
67 #define VECDESC_FL_NONSHARED (1<<3)
68
69 //Pack using bitfields for better memory use
70 struct vector_desc_t {
71 int flags: 16; //OR of VECDESC_FLAG_* defines
72 unsigned int cpu: 1;
73 unsigned int intno: 5;
74 int source: 8; //Interrupt mux flags, used when not shared
75 shared_vector_desc_t *shared_vec_info; //used when VECDESC_FL_SHARED
76 vector_desc_t *next;
77 };
78
79 struct intr_handle_data_t {
80 vector_desc_t *vector_desc;
81 shared_vector_desc_t *shared_vector_desc;
82 };
83
84 typedef struct non_shared_isr_arg_t non_shared_isr_arg_t;
85
86 struct non_shared_isr_arg_t {
87 intr_handler_t isr;
88 void *isr_arg;
89 int source;
90 };
91
92 //Linked list of vector descriptions, sorted by cpu.intno value
93 static vector_desc_t *vector_desc_head = NULL;
94
95 //This bitmask has an 1 if the int should be disabled when the flash is disabled.
96 static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
97
98 //This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable.
99 static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
100 static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
101
102 static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
103
104 //Inserts an item into vector_desc list so that the list is sorted
105 //with an incrementing cpu.intno value.
insert_vector_desc(vector_desc_t * to_insert)106 static void insert_vector_desc(vector_desc_t *to_insert)
107 {
108 vector_desc_t *vd=vector_desc_head;
109 vector_desc_t *prev=NULL;
110 while(vd!=NULL) {
111 if (vd->cpu > to_insert->cpu) break;
112 if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
113 prev=vd;
114 vd=vd->next;
115 }
116 if ((vector_desc_head==NULL) || (prev==NULL)) {
117 //First item
118 to_insert->next = vd;
119 vector_desc_head=to_insert;
120 } else {
121 prev->next=to_insert;
122 to_insert->next=vd;
123 }
124 }
125
126 //Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
find_desc_for_int(int intno,int cpu)127 static vector_desc_t *find_desc_for_int(int intno, int cpu)
128 {
129 vector_desc_t *vd=vector_desc_head;
130 while(vd!=NULL) {
131 if (vd->cpu==cpu && vd->intno==intno) break;
132 vd=vd->next;
133 }
134 return vd;
135 }
136
137 //Returns a vector_desc entry for an intno/cpu.
138 //Either returns a preexisting one or allocates a new one and inserts
139 //it into the list. Returns NULL on malloc fail.
get_desc_for_int(int intno,int cpu)140 static vector_desc_t *get_desc_for_int(int intno, int cpu)
141 {
142 vector_desc_t *vd=find_desc_for_int(intno, cpu);
143 if (vd==NULL) {
144 vector_desc_t *newvd=heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
145 if (newvd==NULL) return NULL;
146 memset(newvd, 0, sizeof(vector_desc_t));
147 newvd->intno=intno;
148 newvd->cpu=cpu;
149 insert_vector_desc(newvd);
150 return newvd;
151 } else {
152 return vd;
153 }
154 }
155
156 //Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
find_desc_for_source(int source,int cpu)157 static vector_desc_t * find_desc_for_source(int source, int cpu)
158 {
159 vector_desc_t *vd=vector_desc_head;
160 while(vd!=NULL) {
161 if ( !(vd->flags & VECDESC_FL_SHARED) ) {
162 if ( vd->source == source && cpu == vd->cpu ) break;
163 } else if ( vd->cpu == cpu ) {
164 // check only shared vds for the correct cpu, otherwise skip
165 bool found = false;
166 shared_vector_desc_t *svd = vd->shared_vec_info;
167 assert(svd != NULL );
168 while(svd) {
169 if ( svd->source == source ) {
170 found = true;
171 break;
172 }
173 svd = svd->next;
174 }
175 if ( found ) break;
176 }
177 vd=vd->next;
178 }
179 return vd;
180 }
181
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)182 esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
183 {
184 if (intno>31) return ESP_ERR_INVALID_ARG;
185 if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
186
187 portENTER_CRITICAL(&spinlock);
188 vector_desc_t *vd=get_desc_for_int(intno, cpu);
189 if (vd==NULL) {
190 portEXIT_CRITICAL(&spinlock);
191 return ESP_ERR_NO_MEM;
192 }
193 vd->flags=VECDESC_FL_SHARED;
194 if (is_int_ram) vd->flags|=VECDESC_FL_INIRAM;
195 portEXIT_CRITICAL(&spinlock);
196
197 return ESP_OK;
198 }
199
esp_intr_reserve(int intno,int cpu)200 esp_err_t esp_intr_reserve(int intno, int cpu)
201 {
202 if (intno>31) return ESP_ERR_INVALID_ARG;
203 if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
204
205 portENTER_CRITICAL(&spinlock);
206 vector_desc_t *vd=get_desc_for_int(intno, cpu);
207 if (vd==NULL) {
208 portEXIT_CRITICAL(&spinlock);
209 return ESP_ERR_NO_MEM;
210 }
211 vd->flags=VECDESC_FL_RESERVED;
212 portEXIT_CRITICAL(&spinlock);
213
214 return ESP_OK;
215 }
216
is_vect_desc_usable(vector_desc_t * vd,int flags,int cpu,int force)217 static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force)
218 {
219 //Check if interrupt is not reserved by design
220 int x = vd->intno;
221 if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_RESVD) {
222 ALCHLOG("....Unusable: reserved");
223 return false;
224 }
225 if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_SPECIAL && force==-1) {
226 ALCHLOG("....Unusable: special-purpose int");
227 return false;
228 }
229
230 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
231 //Check if the interrupt level is acceptable
232 if (!(flags&(1<<interrupt_controller_hal_get_level(x)))) {
233 ALCHLOG("....Unusable: incompatible level");
234 return false;
235 }
236 //check if edge/level type matches what we want
237 if (((flags&ESP_INTR_FLAG_EDGE) && (interrupt_controller_hal_get_type(x)==INTTP_LEVEL)) ||
238 (((!(flags&ESP_INTR_FLAG_EDGE)) && (interrupt_controller_hal_get_type(x)==INTTP_EDGE)))) {
239 ALCHLOG("....Unusable: incompatible trigger type");
240 return false;
241 }
242 #endif
243
244 //check if interrupt is reserved at runtime
245 if (vd->flags&VECDESC_FL_RESERVED) {
246 ALCHLOG("....Unusable: reserved at runtime.");
247 return false;
248 }
249
250 //Ints can't be both shared and non-shared.
251 assert(!((vd->flags&VECDESC_FL_SHARED)&&(vd->flags&VECDESC_FL_NONSHARED)));
252 //check if interrupt already is in use by a non-shared interrupt
253 if (vd->flags&VECDESC_FL_NONSHARED) {
254 ALCHLOG("....Unusable: already in (non-shared) use.");
255 return false;
256 }
257 // check shared interrupt flags
258 if (vd->flags&VECDESC_FL_SHARED ) {
259 if (flags&ESP_INTR_FLAG_SHARED) {
260 bool in_iram_flag=((flags&ESP_INTR_FLAG_IRAM)!=0);
261 bool desc_in_iram_flag=((vd->flags&VECDESC_FL_INIRAM)!=0);
262 //Bail out if int is shared, but iram property doesn't match what we want.
263 if ((vd->flags&VECDESC_FL_SHARED) && (desc_in_iram_flag!=in_iram_flag)) {
264 ALCHLOG("....Unusable: shared but iram prop doesn't match");
265 return false;
266 }
267 } else {
268 //We need an unshared IRQ; can't use shared ones; bail out if this is shared.
269 ALCHLOG("...Unusable: int is shared, we need non-shared.");
270 return false;
271 }
272 } else if (interrupt_controller_hal_has_handler(x, cpu)) {
273 //Check if interrupt already is allocated by interrupt_controller_hal_set_int_handler
274 ALCHLOG("....Unusable: already allocated");
275 return false;
276 }
277
278 return true;
279 }
280
281 //Locate a free interrupt compatible with the flags given.
282 //The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
283 //When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
get_available_int(int flags,int cpu,int force,int source)284 static int get_available_int(int flags, int cpu, int force, int source)
285 {
286 int x;
287 int best=-1;
288 int bestLevel=9;
289 int bestSharedCt=INT_MAX;
290
291 //Default vector desc, for vectors not in the linked list
292 vector_desc_t empty_vect_desc;
293 memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
294
295 //Level defaults to any low/med interrupt
296 if (!(flags&ESP_INTR_FLAG_LEVELMASK)) flags|=ESP_INTR_FLAG_LOWMED;
297
298 ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
299 vector_desc_t *vd = find_desc_for_source(source, cpu);
300 if ( vd ) {
301 // if existing vd found, don't need to search any more.
302 ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
303 if ( force != -1 && force != vd->intno ) {
304 ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
305 } else if ( !is_vect_desc_usable(vd, flags, cpu, force) ) {
306 ALCHLOG("get_avalible_int: existing vd invalid.");
307 } else {
308 best = vd->intno;
309 }
310 return best;
311 }
312 if (force!=-1) {
313 ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
314 //if force assigned, don't need to search any more.
315 vd = find_desc_for_int(force, cpu);
316 if (vd == NULL ) {
317 //if existing vd not found, just check the default state for the intr.
318 empty_vect_desc.intno = force;
319 vd = &empty_vect_desc;
320 }
321 if ( is_vect_desc_usable(vd, flags, cpu, force) ) {
322 best = vd->intno;
323 } else {
324 ALCHLOG("get_avalible_int: forced vd invalid.");
325 }
326 return best;
327 }
328
329 ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
330 //No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
331 for (x=0; x<32; x++) {
332 //Grab the vector_desc for this vector.
333 vd=find_desc_for_int(x, cpu);
334 if (vd==NULL) {
335 empty_vect_desc.intno = x;
336 vd=&empty_vect_desc;
337 }
338
339 ALCHLOG("Int %d reserved %d level %d %s hasIsr %d",
340 x, interrupt_controller_hal_get_cpu_desc_flags(x,cpu)==INTDESC_RESVD, interrupt_controller_hal_get_level(x),
341 interrupt_controller_hal_get_type(x)==INTTP_LEVEL?"LEVEL":"EDGE", interrupt_controller_hal_has_handler(x, cpu));
342
343 if ( !is_vect_desc_usable(vd, flags, cpu, force) ) continue;
344
345 if (flags&ESP_INTR_FLAG_SHARED) {
346 //We're allocating a shared int.
347
348 //See if int already is used as a shared interrupt.
349 if (vd->flags&VECDESC_FL_SHARED) {
350 //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
351 //how useful it is.
352 int no=0;
353 shared_vector_desc_t *svdesc=vd->shared_vec_info;
354 while (svdesc!=NULL) {
355 no++;
356 svdesc=svdesc->next;
357 }
358 if (no<bestSharedCt || bestLevel>interrupt_controller_hal_get_level(x)) {
359 //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
360 best=x;
361 bestSharedCt=no;
362 bestLevel=interrupt_controller_hal_get_level(x);
363 ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
364 } else {
365 ALCHLOG("...worse than int %d", best);
366 }
367 } else {
368 if (best==-1) {
369 //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
370 //not marked as shared.
371 //Remember it in case we don't find any other shared interrupt that qualifies.
372 if (bestLevel>interrupt_controller_hal_get_level(x)) {
373 best=x;
374 bestLevel=interrupt_controller_hal_get_level(x);
375 ALCHLOG("...int %d usable as a new shared int", x);
376 }
377 } else {
378 ALCHLOG("...already have a shared int");
379 }
380 }
381 } else {
382 //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
383 if (bestLevel>interrupt_controller_hal_get_level(x)) {
384 best=x;
385 bestLevel=interrupt_controller_hal_get_level(x);
386 } else {
387 ALCHLOG("...worse than int %d", best);
388 }
389 }
390 }
391 ALCHLOG("get_available_int: using int %d", best);
392
393 //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best.
394 return best;
395 }
396
397 //Common shared isr handler. Chain-call all ISRs.
shared_intr_isr(void * arg)398 static void IRAM_ATTR shared_intr_isr(void *arg)
399 {
400 vector_desc_t *vd=(vector_desc_t*)arg;
401 shared_vector_desc_t *sh_vec=vd->shared_vec_info;
402 portENTER_CRITICAL_ISR(&spinlock);
403 while(sh_vec) {
404 if (!sh_vec->disabled) {
405 if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
406 traceISR_ENTER(sh_vec->source+ETS_INTERNAL_INTR_SOURCE_OFF);
407 sh_vec->isr(sh_vec->arg);
408 // check if we will return to scheduler or to interrupted task after ISR
409 if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
410 traceISR_EXIT();
411 }
412 }
413 }
414 sh_vec=sh_vec->next;
415 }
416 portEXIT_CRITICAL_ISR(&spinlock);
417 }
418
419 #if CONFIG_APPTRACE_SV_ENABLE
420 //Common non-shared isr handler wrapper.
non_shared_intr_isr(void * arg)421 static void IRAM_ATTR non_shared_intr_isr(void *arg)
422 {
423 non_shared_isr_arg_t *ns_isr_arg=(non_shared_isr_arg_t*)arg;
424 portENTER_CRITICAL_ISR(&spinlock);
425 traceISR_ENTER(ns_isr_arg->source+ETS_INTERNAL_INTR_SOURCE_OFF);
426 // FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
427 // when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
428 ns_isr_arg->isr(ns_isr_arg->isr_arg);
429 // check if we will return to scheduler or to interrupted task after ISR
430 if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
431 traceISR_EXIT();
432 }
433 portEXIT_CRITICAL_ISR(&spinlock);
434 }
435 #endif
436
437 //We use ESP_EARLY_LOG* here because this can be called before the scheduler is running.
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)438 esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler,
439 void *arg, intr_handle_t *ret_handle)
440 {
441 intr_handle_data_t *ret=NULL;
442 int force=-1;
443 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", cpu_hal_get_core_id());
444 //Shared interrupts should be level-triggered.
445 if ((flags&ESP_INTR_FLAG_SHARED) && (flags&ESP_INTR_FLAG_EDGE)) return ESP_ERR_INVALID_ARG;
446 //You can't set an handler / arg for a non-C-callable interrupt.
447 if ((flags&ESP_INTR_FLAG_HIGH) && (handler)) return ESP_ERR_INVALID_ARG;
448 //Shared ints should have handler and non-processor-local source
449 if ((flags&ESP_INTR_FLAG_SHARED) && (!handler || source<0)) return ESP_ERR_INVALID_ARG;
450 //Statusreg should have a mask
451 if (intrstatusreg && !intrstatusmask) return ESP_ERR_INVALID_ARG;
452 //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
453 //ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
454 //we need to make sure the interrupt is connected to the CPU0.
455 //CPU1 does not have access to the RTC fast memory through this region.
456 if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
457 return ESP_ERR_INVALID_ARG;
458 }
459
460 //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
461 if ((flags&ESP_INTR_FLAG_LEVELMASK)==0) {
462 if (flags&ESP_INTR_FLAG_SHARED) {
463 flags|=ESP_INTR_FLAG_LEVEL1;
464 } else {
465 flags|=ESP_INTR_FLAG_LOWMED;
466 }
467 }
468 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", cpu_hal_get_core_id(), flags);
469
470 //Check 'special' interrupt sources. These are tied to one specific interrupt, so we
471 //have to force get_free_int to only look at that.
472 if (source==ETS_INTERNAL_TIMER0_INTR_SOURCE) force=ETS_INTERNAL_TIMER0_INTR_NO;
473 if (source==ETS_INTERNAL_TIMER1_INTR_SOURCE) force=ETS_INTERNAL_TIMER1_INTR_NO;
474 if (source==ETS_INTERNAL_TIMER2_INTR_SOURCE) force=ETS_INTERNAL_TIMER2_INTR_NO;
475 if (source==ETS_INTERNAL_SW0_INTR_SOURCE) force=ETS_INTERNAL_SW0_INTR_NO;
476 if (source==ETS_INTERNAL_SW1_INTR_SOURCE) force=ETS_INTERNAL_SW1_INTR_NO;
477 if (source==ETS_INTERNAL_PROFILING_INTR_SOURCE) force=ETS_INTERNAL_PROFILING_INTR_NO;
478
479 //Allocate a return handle. If we end up not needing it, we'll free it later on.
480 ret=heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
481 if (ret==NULL) return ESP_ERR_NO_MEM;
482
483 portENTER_CRITICAL(&spinlock);
484 uint32_t cpu = cpu_hal_get_core_id();
485 //See if we can find an interrupt that matches the flags.
486 int intr=get_available_int(flags, cpu, force, source);
487 if (intr==-1) {
488 //None found. Bail out.
489 portEXIT_CRITICAL(&spinlock);
490 free(ret);
491 return ESP_ERR_NOT_FOUND;
492 }
493 //Get an int vector desc for int.
494 vector_desc_t *vd=get_desc_for_int(intr, cpu);
495 if (vd==NULL) {
496 portEXIT_CRITICAL(&spinlock);
497 free(ret);
498 return ESP_ERR_NO_MEM;
499 }
500
501 //Allocate that int!
502 if (flags&ESP_INTR_FLAG_SHARED) {
503 //Populate vector entry and add to linked list.
504 shared_vector_desc_t *sh_vec=malloc(sizeof(shared_vector_desc_t));
505 if (sh_vec==NULL) {
506 portEXIT_CRITICAL(&spinlock);
507 free(ret);
508 return ESP_ERR_NO_MEM;
509 }
510 memset(sh_vec, 0, sizeof(shared_vector_desc_t));
511 sh_vec->statusreg=(uint32_t*)intrstatusreg;
512 sh_vec->statusmask=intrstatusmask;
513 sh_vec->isr=handler;
514 sh_vec->arg=arg;
515 sh_vec->next=vd->shared_vec_info;
516 sh_vec->source=source;
517 sh_vec->disabled=0;
518 vd->shared_vec_info=sh_vec;
519 vd->flags|=VECDESC_FL_SHARED;
520 //(Re-)set shared isr handler to new value.
521 interrupt_controller_hal_set_int_handler(intr, shared_intr_isr, vd);
522 } else {
523 //Mark as unusable for other interrupt sources. This is ours now!
524 vd->flags=VECDESC_FL_NONSHARED;
525 if (handler) {
526 #if CONFIG_APPTRACE_SV_ENABLE
527 non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
528 if (!ns_isr_arg) {
529 portEXIT_CRITICAL(&spinlock);
530 free(ret);
531 return ESP_ERR_NO_MEM;
532 }
533 ns_isr_arg->isr=handler;
534 ns_isr_arg->isr_arg=arg;
535 ns_isr_arg->source=source;
536 interrupt_controller_hal_set_int_handler(intr, non_shared_intr_isr, ns_isr_arg);
537 #else
538 interrupt_controller_hal_set_int_handler(intr, handler, arg);
539 #endif
540 }
541
542 if (flags & ESP_INTR_FLAG_EDGE) {
543 interrupt_controller_hal_edge_int_acknowledge(intr);
544 }
545
546 vd->source=source;
547 }
548 if (flags&ESP_INTR_FLAG_IRAM) {
549 vd->flags|=VECDESC_FL_INIRAM;
550 non_iram_int_mask[cpu]&=~(1<<intr);
551 } else {
552 vd->flags&=~VECDESC_FL_INIRAM;
553 non_iram_int_mask[cpu]|=(1<<intr);
554 }
555 if (source>=0) {
556 intr_matrix_set(cpu, source, intr);
557 }
558
559 //Fill return handle data.
560 ret->vector_desc=vd;
561 ret->shared_vector_desc=vd->shared_vec_info;
562
563 //Enable int at CPU-level;
564 ESP_INTR_ENABLE(intr);
565
566 //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
567 //of the critical section.
568 if (flags&ESP_INTR_FLAG_INTRDISABLED) {
569 esp_intr_disable(ret);
570 }
571
572 #ifdef SOC_CPU_HAS_FLEXIBLE_INTC
573 //Extract the level from the interrupt passed flags
574 int level = esp_intr_flags_to_level(flags);
575 interrupt_controller_hal_set_int_level(intr, level);
576
577 if (flags & ESP_INTR_FLAG_EDGE) {
578 interrupt_controller_hal_set_int_type(intr, INTTP_EDGE);
579 } else {
580 interrupt_controller_hal_set_int_type(intr, INTTP_LEVEL);
581 }
582 #endif
583
584 portEXIT_CRITICAL(&spinlock);
585
586 //Fill return handle if needed, otherwise free handle.
587 if (ret_handle!=NULL) {
588 *ret_handle=ret;
589 } else {
590 free(ret);
591 }
592
593 ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %d)", source, intr, cpu);
594 return ESP_OK;
595 }
596
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)597 esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle)
598 {
599 /*
600 As an optimization, we can create a table with the possible interrupt status registers and masks for every single
601 source there is. We can then add code here to look up an applicable value and pass that to the
602 esp_intr_alloc_intrstatus function.
603 */
604 return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
605 }
606
esp_intr_set_in_iram(intr_handle_t handle,bool is_in_iram)607 esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
608 {
609 if (!handle) return ESP_ERR_INVALID_ARG;
610 vector_desc_t *vd = handle->vector_desc;
611 if (vd->flags & VECDESC_FL_SHARED) {
612 return ESP_ERR_INVALID_ARG;
613 }
614 portENTER_CRITICAL(&spinlock);
615 uint32_t mask = (1 << vd->intno);
616 if (is_in_iram) {
617 vd->flags |= VECDESC_FL_INIRAM;
618 non_iram_int_mask[vd->cpu] &= ~mask;
619 } else {
620 vd->flags &= ~VECDESC_FL_INIRAM;
621 non_iram_int_mask[vd->cpu] |= mask;
622 }
623 portEXIT_CRITICAL(&spinlock);
624 return ESP_OK;
625 }
626
627 #if !CONFIG_FREERTOS_UNICORE
esp_intr_free_cb(void * arg)628 static void esp_intr_free_cb(void *arg)
629 {
630 (void)esp_intr_free((intr_handle_t)arg);
631 }
632 #endif /* !CONFIG_FREERTOS_UNICORE */
633
esp_intr_free(intr_handle_t handle)634 esp_err_t esp_intr_free(intr_handle_t handle)
635 {
636 bool free_shared_vector=false;
637 if (!handle) return ESP_ERR_INVALID_ARG;
638
639 #if !CONFIG_FREERTOS_UNICORE
640 //Assign this routine to the core where this interrupt is allocated on.
641 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
642 esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
643 return ret == ESP_OK ? ESP_OK : ESP_FAIL;
644 }
645 #endif /* !CONFIG_FREERTOS_UNICORE */
646
647 portENTER_CRITICAL(&spinlock);
648 esp_intr_disable(handle);
649 if (handle->vector_desc->flags&VECDESC_FL_SHARED) {
650 //Find and kill the shared int
651 shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
652 shared_vector_desc_t *prevsvd=NULL;
653 assert(svd); //should be something in there for a shared int
654 while (svd!=NULL) {
655 if (svd==handle->shared_vector_desc) {
656 //Found it. Now kill it.
657 if (prevsvd) {
658 prevsvd->next=svd->next;
659 } else {
660 handle->vector_desc->shared_vec_info=svd->next;
661 }
662 free(svd);
663 break;
664 }
665 prevsvd=svd;
666 svd=svd->next;
667 }
668 //If nothing left, disable interrupt.
669 if (handle->vector_desc->shared_vec_info==NULL) free_shared_vector=true;
670 ESP_EARLY_LOGV(TAG, "esp_intr_free: Deleting shared int: %s. Shared int is %s", svd?"not found or last one":"deleted", free_shared_vector?"empty now.":"still in use");
671 }
672
673 if ((handle->vector_desc->flags&VECDESC_FL_NONSHARED) || free_shared_vector) {
674 ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
675 #if CONFIG_APPTRACE_SV_ENABLE
676 if (!free_shared_vector) {
677 void *isr_arg = interrupt_controller_hal_get_int_handler_arg(handle->vector_desc->intno);
678 if (isr_arg) {
679 free(isr_arg);
680 }
681 }
682 #endif
683 //Reset to normal handler:
684 interrupt_controller_hal_set_int_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
685 //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
686 //we save.(We can also not use the same exit path for empty shared ints anymore if we delete
687 //the desc.) For now, just mark it as free.
688 handle->vector_desc->flags&=!(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED);
689 //Also kill non_iram mask bit.
690 non_iram_int_mask[handle->vector_desc->cpu]&=~(1<<(handle->vector_desc->intno));
691 }
692 portEXIT_CRITICAL(&spinlock);
693 free(handle);
694 return ESP_OK;
695 }
696
esp_intr_get_intno(intr_handle_t handle)697 int esp_intr_get_intno(intr_handle_t handle)
698 {
699 return handle->vector_desc->intno;
700 }
701
esp_intr_get_cpu(intr_handle_t handle)702 int esp_intr_get_cpu(intr_handle_t handle)
703 {
704 return handle->vector_desc->cpu;
705 }
706
707 /*
708 Interrupt disabling strategy:
709 If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected
710 interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE.
711 This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared
712 interrupts.
713 */
714
715 //Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled.
716 #define INT_MUX_DISABLED_INTNO 6
717
esp_intr_enable(intr_handle_t handle)718 esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
719 {
720 if (!handle) return ESP_ERR_INVALID_ARG;
721 portENTER_CRITICAL_SAFE(&spinlock);
722 int source;
723 if (handle->shared_vector_desc) {
724 handle->shared_vector_desc->disabled=0;
725 source=handle->shared_vector_desc->source;
726 } else {
727 source=handle->vector_desc->source;
728 }
729 if (source >= 0) {
730 //Disabled using int matrix; re-connect to enable
731 intr_matrix_set(handle->vector_desc->cpu, source, handle->vector_desc->intno);
732 } else {
733 //Re-enable using cpu int ena reg
734 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
735 ESP_INTR_ENABLE(handle->vector_desc->intno);
736 }
737 portEXIT_CRITICAL_SAFE(&spinlock);
738 return ESP_OK;
739 }
740
esp_intr_disable(intr_handle_t handle)741 esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
742 {
743 if (!handle) return ESP_ERR_INVALID_ARG;
744 portENTER_CRITICAL_SAFE(&spinlock);
745 int source;
746 bool disabled = 1;
747 if (handle->shared_vector_desc) {
748 handle->shared_vector_desc->disabled=1;
749 source=handle->shared_vector_desc->source;
750
751 shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
752 assert( svd != NULL );
753 while( svd ) {
754 if ( svd->source == source && svd->disabled == 0 ) {
755 disabled = 0;
756 break;
757 }
758 svd = svd->next;
759 }
760 } else {
761 source=handle->vector_desc->source;
762 }
763
764 if (source >= 0) {
765 if ( disabled ) {
766 //Disable using int matrix
767 intr_matrix_set(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
768 }
769 } else {
770 //Disable using per-cpu regs
771 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
772 portEXIT_CRITICAL_SAFE(&spinlock);
773 return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
774 }
775 ESP_INTR_DISABLE(handle->vector_desc->intno);
776 }
777 portEXIT_CRITICAL_SAFE(&spinlock);
778 return ESP_OK;
779 }
780
esp_intr_noniram_disable(void)781 void IRAM_ATTR esp_intr_noniram_disable(void)
782 {
783 portENTER_CRITICAL_SAFE(&spinlock);
784 uint32_t oldint;
785 uint32_t cpu = cpu_hal_get_core_id();
786 uint32_t non_iram_ints = non_iram_int_mask[cpu];
787 if (non_iram_int_disabled_flag[cpu]) {
788 abort();
789 }
790 non_iram_int_disabled_flag[cpu] = true;
791 oldint = interrupt_controller_hal_read_interrupt_mask();
792 interrupt_controller_hal_disable_interrupts(non_iram_ints);
793 // Save disabled ints
794 non_iram_int_disabled[cpu] = oldint & non_iram_ints;
795 portEXIT_CRITICAL_SAFE(&spinlock);
796 }
797
esp_intr_noniram_enable(void)798 void IRAM_ATTR esp_intr_noniram_enable(void)
799 {
800 portENTER_CRITICAL_SAFE(&spinlock);
801 uint32_t cpu = cpu_hal_get_core_id();
802 int non_iram_ints = non_iram_int_disabled[cpu];
803 if (!non_iram_int_disabled_flag[cpu]) {
804 abort();
805 }
806 non_iram_int_disabled_flag[cpu] = false;
807 interrupt_controller_hal_enable_interrupts(non_iram_ints);
808 portEXIT_CRITICAL_SAFE(&spinlock);
809 }
810
811 //These functions are provided in ROM, but the ROM-based functions use non-multicore-capable
812 //virtualized interrupt levels. Thus, we disable them in the ld file and provide working
813 //equivalents here.
814
815
ets_isr_unmask(uint32_t mask)816 void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
817 interrupt_controller_hal_enable_interrupts(mask);
818 }
819
ets_isr_mask(uint32_t mask)820 void IRAM_ATTR ets_isr_mask(uint32_t mask) {
821 interrupt_controller_hal_disable_interrupts(mask);
822 }
823
esp_intr_enable_source(int inum)824 void esp_intr_enable_source(int inum)
825 {
826 interrupt_controller_hal_enable_interrupts(1 << inum);
827 }
828
esp_intr_disable_source(int inum)829 void esp_intr_disable_source(int inum)
830 {
831 interrupt_controller_hal_disable_interrupts(1 << inum);
832 }
833