1 // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <stdint.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <stdbool.h>
19 #include <string.h>
20 #include <esp_types.h>
21 #include <limits.h>
22 #include <assert.h>
23 #include "sdkconfig.h"
24 #include "freertos/FreeRTOS.h"
25 #include "freertos/task.h"
26 #include "esp_err.h"
27 #include "esp_log.h"
28 #include "esp_intr_alloc.h"
29 #include "esp_attr.h"
30 #include "hal/cpu_hal.h"
31 #include "hal/interrupt_controller_hal.h"
32
33 #if !CONFIG_FREERTOS_UNICORE
34 #include "esp_ipc.h"
35 #endif
36
37 static const char* TAG = "intr_alloc";
38
39 #define ETS_INTERNAL_TIMER0_INTR_NO 6
40 #define ETS_INTERNAL_TIMER1_INTR_NO 15
41 #define ETS_INTERNAL_TIMER2_INTR_NO 16
42 #define ETS_INTERNAL_SW0_INTR_NO 7
43 #define ETS_INTERNAL_SW1_INTR_NO 29
44 #define ETS_INTERNAL_PROFILING_INTR_NO 11
45
46 /*
47 Define this to debug the choices made when allocating the interrupt. This leads to much debugging
48 output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
49 being triggered, that is why it is separate from the normal LOG* scheme.
50 */
51 // #define DEBUG_INT_ALLOC_DECISIONS
52
53 #ifdef DEBUG_INT_ALLOC_DECISIONS
54 # define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__)
55 #else
56 # define ALCHLOG(...) do {} while (0)
57 #endif
58
59 typedef struct shared_vector_desc_t shared_vector_desc_t;
60 typedef struct vector_desc_t vector_desc_t;
61
62 struct shared_vector_desc_t {
63 int disabled: 1;
64 int source: 8;
65 volatile uint32_t *statusreg;
66 uint32_t statusmask;
67 intr_handler_t isr;
68 void *arg;
69 shared_vector_desc_t *next;
70 };
71
72 #define VECDESC_FL_RESERVED (1<<0)
73 #define VECDESC_FL_INIRAM (1<<1)
74 #define VECDESC_FL_SHARED (1<<2)
75 #define VECDESC_FL_NONSHARED (1<<3)
76
77 //Pack using bitfields for better memory use
78 struct vector_desc_t {
79 int flags: 16; //OR of VECDESC_FLAG_* defines
80 unsigned int cpu: 1;
81 unsigned int intno: 5;
82 int source: 8; //Interrupt mux flags, used when not shared
83 shared_vector_desc_t *shared_vec_info; //used when VECDESC_FL_SHARED
84 vector_desc_t *next;
85 };
86
87 struct intr_handle_data_t {
88 vector_desc_t *vector_desc;
89 shared_vector_desc_t *shared_vector_desc;
90 };
91
92 typedef struct non_shared_isr_arg_t non_shared_isr_arg_t;
93
94 struct non_shared_isr_arg_t {
95 intr_handler_t isr;
96 void *isr_arg;
97 int source;
98 };
99
100 //Linked list of vector descriptions, sorted by cpu.intno value
101 static vector_desc_t *vector_desc_head = NULL;
102
103 //This bitmask has an 1 if the int should be disabled when the flash is disabled.
104 static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
105
106 //This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable.
107 static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
108 static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
109
110 #if CONFIG_SYSVIEW_ENABLE
111 extern uint32_t port_switch_flag[];
112 #endif
113
114 static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
115
116 //Inserts an item into vector_desc list so that the list is sorted
117 //with an incrementing cpu.intno value.
insert_vector_desc(vector_desc_t * to_insert)118 static void insert_vector_desc(vector_desc_t *to_insert)
119 {
120 vector_desc_t *vd=vector_desc_head;
121 vector_desc_t *prev=NULL;
122 while(vd!=NULL) {
123 if (vd->cpu > to_insert->cpu) break;
124 if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
125 prev=vd;
126 vd=vd->next;
127 }
128 if ((vector_desc_head==NULL) || (prev==NULL)) {
129 //First item
130 to_insert->next = vd;
131 vector_desc_head=to_insert;
132 } else {
133 prev->next=to_insert;
134 to_insert->next=vd;
135 }
136 }
137
138 //Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
find_desc_for_int(int intno,int cpu)139 static vector_desc_t *find_desc_for_int(int intno, int cpu)
140 {
141 vector_desc_t *vd=vector_desc_head;
142 while(vd!=NULL) {
143 if (vd->cpu==cpu && vd->intno==intno) break;
144 vd=vd->next;
145 }
146 return vd;
147 }
148
149 //Returns a vector_desc entry for an intno/cpu.
150 //Either returns a preexisting one or allocates a new one and inserts
151 //it into the list. Returns NULL on malloc fail.
get_desc_for_int(int intno,int cpu)152 static vector_desc_t *get_desc_for_int(int intno, int cpu)
153 {
154 vector_desc_t *vd=find_desc_for_int(intno, cpu);
155 if (vd==NULL) {
156 vector_desc_t *newvd=heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
157 if (newvd==NULL) return NULL;
158 memset(newvd, 0, sizeof(vector_desc_t));
159 newvd->intno=intno;
160 newvd->cpu=cpu;
161 insert_vector_desc(newvd);
162 return newvd;
163 } else {
164 return vd;
165 }
166 }
167
168 //Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
find_desc_for_source(int source,int cpu)169 static vector_desc_t * find_desc_for_source(int source, int cpu)
170 {
171 vector_desc_t *vd=vector_desc_head;
172 while(vd!=NULL) {
173 if ( !(vd->flags & VECDESC_FL_SHARED) ) {
174 if ( vd->source == source && cpu == vd->cpu ) break;
175 } else if ( vd->cpu == cpu ) {
176 // check only shared vds for the correct cpu, otherwise skip
177 bool found = false;
178 shared_vector_desc_t *svd = vd->shared_vec_info;
179 assert(svd != NULL );
180 while(svd) {
181 if ( svd->source == source ) {
182 found = true;
183 break;
184 }
185 svd = svd->next;
186 }
187 if ( found ) break;
188 }
189 vd=vd->next;
190 }
191 return vd;
192 }
193
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)194 esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
195 {
196 if (intno>31) return ESP_ERR_INVALID_ARG;
197 if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
198
199 portENTER_CRITICAL(&spinlock);
200 vector_desc_t *vd=get_desc_for_int(intno, cpu);
201 if (vd==NULL) {
202 portEXIT_CRITICAL(&spinlock);
203 return ESP_ERR_NO_MEM;
204 }
205 vd->flags=VECDESC_FL_SHARED;
206 if (is_int_ram) vd->flags|=VECDESC_FL_INIRAM;
207 portEXIT_CRITICAL(&spinlock);
208
209 return ESP_OK;
210 }
211
esp_intr_reserve(int intno,int cpu)212 esp_err_t esp_intr_reserve(int intno, int cpu)
213 {
214 if (intno>31) return ESP_ERR_INVALID_ARG;
215 if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
216
217 portENTER_CRITICAL(&spinlock);
218 vector_desc_t *vd=get_desc_for_int(intno, cpu);
219 if (vd==NULL) {
220 portEXIT_CRITICAL(&spinlock);
221 return ESP_ERR_NO_MEM;
222 }
223 vd->flags=VECDESC_FL_RESERVED;
224 portEXIT_CRITICAL(&spinlock);
225
226 return ESP_OK;
227 }
228
is_vect_desc_usable(vector_desc_t * vd,int flags,int cpu,int force)229 static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force)
230 {
231 //Check if interrupt is not reserved by design
232 int x = vd->intno;
233 if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_RESVD) {
234 ALCHLOG("....Unusable: reserved");
235 return false;
236 }
237 if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_SPECIAL && force==-1) {
238 ALCHLOG("....Unusable: special-purpose int");
239 return false;
240 }
241
242 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
243 //Check if the interrupt level is acceptable
244 if (!(flags&(1<<interrupt_controller_hal_get_level(x)))) {
245 ALCHLOG("....Unusable: incompatible level");
246 return false;
247 }
248 //check if edge/level type matches what we want
249 if (((flags&ESP_INTR_FLAG_EDGE) && (interrupt_controller_hal_get_type(x)==INTTP_LEVEL)) ||
250 (((!(flags&ESP_INTR_FLAG_EDGE)) && (interrupt_controller_hal_get_type(x)==INTTP_EDGE)))) {
251 ALCHLOG("....Unusable: incompatible trigger type");
252 return false;
253 }
254 #endif
255
256 //check if interrupt is reserved at runtime
257 if (vd->flags&VECDESC_FL_RESERVED) {
258 ALCHLOG("....Unusable: reserved at runtime.");
259 return false;
260 }
261
262 //Ints can't be both shared and non-shared.
263 assert(!((vd->flags&VECDESC_FL_SHARED)&&(vd->flags&VECDESC_FL_NONSHARED)));
264 //check if interrupt already is in use by a non-shared interrupt
265 if (vd->flags&VECDESC_FL_NONSHARED) {
266 ALCHLOG("....Unusable: already in (non-shared) use.");
267 return false;
268 }
269 // check shared interrupt flags
270 if (vd->flags&VECDESC_FL_SHARED ) {
271 if (flags&ESP_INTR_FLAG_SHARED) {
272 bool in_iram_flag=((flags&ESP_INTR_FLAG_IRAM)!=0);
273 bool desc_in_iram_flag=((vd->flags&VECDESC_FL_INIRAM)!=0);
274 //Bail out if int is shared, but iram property doesn't match what we want.
275 if ((vd->flags&VECDESC_FL_SHARED) && (desc_in_iram_flag!=in_iram_flag)) {
276 ALCHLOG("....Unusable: shared but iram prop doesn't match");
277 return false;
278 }
279 } else {
280 //We need an unshared IRQ; can't use shared ones; bail out if this is shared.
281 ALCHLOG("...Unusable: int is shared, we need non-shared.");
282 return false;
283 }
284 } else if (interrupt_controller_hal_has_handler(x, cpu)) {
285 //Check if interrupt already is allocated by interrupt_controller_hal_set_int_handler
286 ALCHLOG("....Unusable: already allocated");
287 return false;
288 }
289
290 return true;
291 }
292
293 //Locate a free interrupt compatible with the flags given.
294 //The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
295 //When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
get_available_int(int flags,int cpu,int force,int source)296 static int get_available_int(int flags, int cpu, int force, int source)
297 {
298 int x;
299 int best=-1;
300 int bestLevel=9;
301 int bestSharedCt=INT_MAX;
302
303 //Default vector desc, for vectors not in the linked list
304 vector_desc_t empty_vect_desc;
305 memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
306
307 //Level defaults to any low/med interrupt
308 if (!(flags&ESP_INTR_FLAG_LEVELMASK)) flags|=ESP_INTR_FLAG_LOWMED;
309
310 ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
311 vector_desc_t *vd = find_desc_for_source(source, cpu);
312 if ( vd ) {
313 // if existing vd found, don't need to search any more.
314 ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
315 if ( force != -1 && force != vd->intno ) {
316 ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
317 } else if ( !is_vect_desc_usable(vd, flags, cpu, force) ) {
318 ALCHLOG("get_avalible_int: existing vd invalid.");
319 } else {
320 best = vd->intno;
321 }
322 return best;
323 }
324 if (force!=-1) {
325 ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
326 //if force assigned, don't need to search any more.
327 vd = find_desc_for_int(force, cpu);
328 if (vd == NULL ) {
329 //if existing vd not found, just check the default state for the intr.
330 empty_vect_desc.intno = force;
331 vd = &empty_vect_desc;
332 }
333 if ( is_vect_desc_usable(vd, flags, cpu, force) ) {
334 best = vd->intno;
335 } else {
336 ALCHLOG("get_avalible_int: forced vd invalid.");
337 }
338 return best;
339 }
340
341 ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
342 //No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
343 for (x=0; x<32; x++) {
344 //Grab the vector_desc for this vector.
345 vd=find_desc_for_int(x, cpu);
346 if (vd==NULL) {
347 empty_vect_desc.intno = x;
348 vd=&empty_vect_desc;
349 }
350
351 ALCHLOG("Int %d reserved %d level %d %s hasIsr %d",
352 x, interrupt_controller_hal_get_cpu_desc_flags(x,cpu)==INTDESC_RESVD, interrupt_controller_hal_get_level(x),
353 interrupt_controller_hal_get_type(x)==INTTP_LEVEL?"LEVEL":"EDGE", interrupt_controller_hal_has_handler(x, cpu));
354
355 if ( !is_vect_desc_usable(vd, flags, cpu, force) ) continue;
356
357 if (flags&ESP_INTR_FLAG_SHARED) {
358 //We're allocating a shared int.
359
360 //See if int already is used as a shared interrupt.
361 if (vd->flags&VECDESC_FL_SHARED) {
362 //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
363 //how useful it is.
364 int no=0;
365 shared_vector_desc_t *svdesc=vd->shared_vec_info;
366 while (svdesc!=NULL) {
367 no++;
368 svdesc=svdesc->next;
369 }
370 if (no<bestSharedCt || bestLevel>interrupt_controller_hal_get_level(x)) {
371 //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
372 best=x;
373 bestSharedCt=no;
374 bestLevel=interrupt_controller_hal_get_level(x);
375 ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
376 } else {
377 ALCHLOG("...worse than int %d", best);
378 }
379 } else {
380 if (best==-1) {
381 //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
382 //not marked as shared.
383 //Remember it in case we don't find any other shared interrupt that qualifies.
384 if (bestLevel>interrupt_controller_hal_get_level(x)) {
385 best=x;
386 bestLevel=interrupt_controller_hal_get_level(x);
387 ALCHLOG("...int %d usable as a new shared int", x);
388 }
389 } else {
390 ALCHLOG("...already have a shared int");
391 }
392 }
393 } else {
394 //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
395 if (bestLevel>interrupt_controller_hal_get_level(x)) {
396 best=x;
397 bestLevel=interrupt_controller_hal_get_level(x);
398 } else {
399 ALCHLOG("...worse than int %d", best);
400 }
401 }
402 }
403 ALCHLOG("get_available_int: using int %d", best);
404
405 //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best.
406 return best;
407 }
408
409 //Common shared isr handler. Chain-call all ISRs.
shared_intr_isr(void * arg)410 static void IRAM_ATTR shared_intr_isr(void *arg)
411 {
412 vector_desc_t *vd=(vector_desc_t*)arg;
413 shared_vector_desc_t *sh_vec=vd->shared_vec_info;
414 portENTER_CRITICAL_ISR(&spinlock);
415 while(sh_vec) {
416 if (!sh_vec->disabled) {
417 if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
418 #if CONFIG_SYSVIEW_ENABLE
419 traceISR_ENTER(sh_vec->source+ETS_INTERNAL_INTR_SOURCE_OFF);
420 #endif
421 sh_vec->isr(sh_vec->arg);
422 #if CONFIG_SYSVIEW_ENABLE
423 // check if we will return to scheduler or to interrupted task after ISR
424 if (!port_switch_flag[cpu_hal_get_core_id()]) {
425 traceISR_EXIT();
426 }
427 #endif
428 }
429 }
430 sh_vec=sh_vec->next;
431 }
432 portEXIT_CRITICAL_ISR(&spinlock);
433 }
434
435 #if CONFIG_SYSVIEW_ENABLE
436 //Common non-shared isr handler wrapper.
non_shared_intr_isr(void * arg)437 static void IRAM_ATTR non_shared_intr_isr(void *arg)
438 {
439 non_shared_isr_arg_t *ns_isr_arg=(non_shared_isr_arg_t*)arg;
440 portENTER_CRITICAL_ISR(&spinlock);
441 traceISR_ENTER(ns_isr_arg->source+ETS_INTERNAL_INTR_SOURCE_OFF);
442 // FIXME: can we call ISR and check port_switch_flag after releasing spinlock?
443 // when CONFIG_SYSVIEW_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
444 ns_isr_arg->isr(ns_isr_arg->isr_arg);
445 // check if we will return to scheduler or to interrupted task after ISR
446 if (!port_switch_flag[cpu_hal_get_core_id()]) {
447 traceISR_EXIT();
448 }
449 portEXIT_CRITICAL_ISR(&spinlock);
450 }
451 #endif
452
453 //We use ESP_EARLY_LOG* here because this can be called before the scheduler is running.
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)454 esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler,
455 void *arg, intr_handle_t *ret_handle)
456 {
457 intr_handle_data_t *ret=NULL;
458 int force=-1;
459 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %d): checking args", cpu_hal_get_core_id());
460 //Shared interrupts should be level-triggered.
461 if ((flags&ESP_INTR_FLAG_SHARED) && (flags&ESP_INTR_FLAG_EDGE)) return ESP_ERR_INVALID_ARG;
462 //You can't set an handler / arg for a non-C-callable interrupt.
463 if ((flags&ESP_INTR_FLAG_HIGH) && (handler)) return ESP_ERR_INVALID_ARG;
464 //Shared ints should have handler and non-processor-local source
465 if ((flags&ESP_INTR_FLAG_SHARED) && (!handler || source<0)) return ESP_ERR_INVALID_ARG;
466 //Statusreg should have a mask
467 if (intrstatusreg && !intrstatusmask) return ESP_ERR_INVALID_ARG;
468 //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
469 //ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
470 //we need to make sure the interrupt is connected to the CPU0.
471 //CPU1 does not have access to the RTC fast memory through this region.
472 if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) && !esp_ptr_in_rtc_iram_fast(handler)) {
473 return ESP_ERR_INVALID_ARG;
474 }
475
476 //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
477 if ((flags&ESP_INTR_FLAG_LEVELMASK)==0) {
478 if (flags&ESP_INTR_FLAG_SHARED) {
479 flags|=ESP_INTR_FLAG_LEVEL1;
480 } else {
481 flags|=ESP_INTR_FLAG_LOWMED;
482 }
483 }
484 ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %d): Args okay. Resulting flags 0x%X", cpu_hal_get_core_id(), flags);
485
486 //Check 'special' interrupt sources. These are tied to one specific interrupt, so we
487 //have to force get_free_int to only look at that.
488 if (source==ETS_INTERNAL_TIMER0_INTR_SOURCE) force=ETS_INTERNAL_TIMER0_INTR_NO;
489 if (source==ETS_INTERNAL_TIMER1_INTR_SOURCE) force=ETS_INTERNAL_TIMER1_INTR_NO;
490 if (source==ETS_INTERNAL_TIMER2_INTR_SOURCE) force=ETS_INTERNAL_TIMER2_INTR_NO;
491 if (source==ETS_INTERNAL_SW0_INTR_SOURCE) force=ETS_INTERNAL_SW0_INTR_NO;
492 if (source==ETS_INTERNAL_SW1_INTR_SOURCE) force=ETS_INTERNAL_SW1_INTR_NO;
493 if (source==ETS_INTERNAL_PROFILING_INTR_SOURCE) force=ETS_INTERNAL_PROFILING_INTR_NO;
494
495 //Allocate a return handle. If we end up not needing it, we'll free it later on.
496 ret=heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
497 if (ret==NULL) return ESP_ERR_NO_MEM;
498
499 portENTER_CRITICAL(&spinlock);
500 int cpu=cpu_hal_get_core_id();
501 //See if we can find an interrupt that matches the flags.
502 int intr=get_available_int(flags, cpu, force, source);
503 if (intr==-1) {
504 //None found. Bail out.
505 portEXIT_CRITICAL(&spinlock);
506 free(ret);
507 return ESP_ERR_NOT_FOUND;
508 }
509 //Get an int vector desc for int.
510 vector_desc_t *vd=get_desc_for_int(intr, cpu);
511 if (vd==NULL) {
512 portEXIT_CRITICAL(&spinlock);
513 free(ret);
514 return ESP_ERR_NO_MEM;
515 }
516
517 //Allocate that int!
518 if (flags&ESP_INTR_FLAG_SHARED) {
519 //Populate vector entry and add to linked list.
520 shared_vector_desc_t *sh_vec=malloc(sizeof(shared_vector_desc_t));
521 if (sh_vec==NULL) {
522 portEXIT_CRITICAL(&spinlock);
523 free(ret);
524 return ESP_ERR_NO_MEM;
525 }
526 memset(sh_vec, 0, sizeof(shared_vector_desc_t));
527 sh_vec->statusreg=(uint32_t*)intrstatusreg;
528 sh_vec->statusmask=intrstatusmask;
529 sh_vec->isr=handler;
530 sh_vec->arg=arg;
531 sh_vec->next=vd->shared_vec_info;
532 sh_vec->source=source;
533 sh_vec->disabled=0;
534 vd->shared_vec_info=sh_vec;
535 vd->flags|=VECDESC_FL_SHARED;
536 //(Re-)set shared isr handler to new value.
537 interrupt_controller_hal_set_int_handler(intr, shared_intr_isr, vd);
538 } else {
539 //Mark as unusable for other interrupt sources. This is ours now!
540 vd->flags=VECDESC_FL_NONSHARED;
541 if (handler) {
542 #if CONFIG_SYSVIEW_ENABLE
543 non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
544 if (!ns_isr_arg) {
545 portEXIT_CRITICAL(&spinlock);
546 free(ret);
547 return ESP_ERR_NO_MEM;
548 }
549 ns_isr_arg->isr=handler;
550 ns_isr_arg->isr_arg=arg;
551 ns_isr_arg->source=source;
552 interrupt_controller_hal_set_int_handler(intr, non_shared_intr_isr, ns_isr_arg);
553 #else
554 interrupt_controller_hal_set_int_handler(intr, handler, arg);
555 #endif
556 }
557
558 if (flags & ESP_INTR_FLAG_EDGE) {
559 interrupt_controller_hal_edge_int_acknowledge(intr);
560 }
561
562 vd->source=source;
563 }
564 if (flags&ESP_INTR_FLAG_IRAM) {
565 vd->flags|=VECDESC_FL_INIRAM;
566 non_iram_int_mask[cpu]&=~(1<<intr);
567 } else {
568 vd->flags&=~VECDESC_FL_INIRAM;
569 non_iram_int_mask[cpu]|=(1<<intr);
570 }
571 if (source>=0) {
572 intr_matrix_set(cpu, source, intr);
573 }
574
575 //Fill return handle data.
576 ret->vector_desc=vd;
577 ret->shared_vector_desc=vd->shared_vec_info;
578
579 //Enable int at CPU-level;
580 ESP_INTR_ENABLE(intr);
581
582 //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
583 //of the critical section.
584 if (flags&ESP_INTR_FLAG_INTRDISABLED) {
585 esp_intr_disable(ret);
586 }
587
588 #ifdef SOC_CPU_HAS_FLEXIBLE_INTC
589 //Extract the level from the interrupt passed flags
590 int level = esp_intr_flags_to_level(flags);
591 interrupt_controller_hal_set_int_level(intr, level);
592
593 if (flags & ESP_INTR_FLAG_EDGE) {
594 interrupt_controller_hal_set_int_type(intr, INTTP_EDGE);
595 } else {
596 interrupt_controller_hal_set_int_type(intr, INTTP_LEVEL);
597 }
598 #endif
599
600 portEXIT_CRITICAL(&spinlock);
601
602 //Fill return handle if needed, otherwise free handle.
603 if (ret_handle!=NULL) {
604 *ret_handle=ret;
605 } else {
606 free(ret);
607 }
608
609 ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %d)", source, intr, cpu);
610 return ESP_OK;
611 }
612
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,intr_handle_t * ret_handle)613 esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle)
614 {
615 /*
616 As an optimization, we can create a table with the possible interrupt status registers and masks for every single
617 source there is. We can then add code here to look up an applicable value and pass that to the
618 esp_intr_alloc_intrstatus function.
619 */
620 return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
621 }
622
esp_intr_set_in_iram(intr_handle_t handle,bool is_in_iram)623 esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
624 {
625 if (!handle) return ESP_ERR_INVALID_ARG;
626 vector_desc_t *vd = handle->vector_desc;
627 if (vd->flags & VECDESC_FL_SHARED) {
628 return ESP_ERR_INVALID_ARG;
629 }
630 portENTER_CRITICAL(&spinlock);
631 uint32_t mask = (1 << vd->intno);
632 if (is_in_iram) {
633 vd->flags |= VECDESC_FL_INIRAM;
634 non_iram_int_mask[vd->cpu] &= ~mask;
635 } else {
636 vd->flags &= ~VECDESC_FL_INIRAM;
637 non_iram_int_mask[vd->cpu] |= mask;
638 }
639 portEXIT_CRITICAL(&spinlock);
640 return ESP_OK;
641 }
642
643 #if !CONFIG_FREERTOS_UNICORE
esp_intr_free_cb(void * arg)644 static void esp_intr_free_cb(void *arg)
645 {
646 (void)esp_intr_free((intr_handle_t)arg);
647 }
648 #endif /* !CONFIG_FREERTOS_UNICORE */
649
esp_intr_free(intr_handle_t handle)650 esp_err_t esp_intr_free(intr_handle_t handle)
651 {
652 bool free_shared_vector=false;
653 if (!handle) return ESP_ERR_INVALID_ARG;
654
655 #if !CONFIG_FREERTOS_UNICORE
656 //Assign this routine to the core where this interrupt is allocated on.
657 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
658 esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
659 return ret == ESP_OK ? ESP_OK : ESP_FAIL;
660 }
661 #endif /* !CONFIG_FREERTOS_UNICORE */
662
663 portENTER_CRITICAL(&spinlock);
664 esp_intr_disable(handle);
665 if (handle->vector_desc->flags&VECDESC_FL_SHARED) {
666 //Find and kill the shared int
667 shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
668 shared_vector_desc_t *prevsvd=NULL;
669 assert(svd); //should be something in there for a shared int
670 while (svd!=NULL) {
671 if (svd==handle->shared_vector_desc) {
672 //Found it. Now kill it.
673 if (prevsvd) {
674 prevsvd->next=svd->next;
675 } else {
676 handle->vector_desc->shared_vec_info=svd->next;
677 }
678 free(svd);
679 break;
680 }
681 prevsvd=svd;
682 svd=svd->next;
683 }
684 //If nothing left, disable interrupt.
685 if (handle->vector_desc->shared_vec_info==NULL) free_shared_vector=true;
686 ESP_EARLY_LOGV(TAG, "esp_intr_free: Deleting shared int: %s. Shared int is %s", svd?"not found or last one":"deleted", free_shared_vector?"empty now.":"still in use");
687 }
688
689 if ((handle->vector_desc->flags&VECDESC_FL_NONSHARED) || free_shared_vector) {
690 ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
691 #if CONFIG_SYSVIEW_ENABLE
692 if (!free_shared_vector) {
693 void *isr_arg = interrupt_controller_hal_get_int_handler_arg(handle->vector_desc->intno);
694 if (isr_arg) {
695 free(isr_arg);
696 }
697 }
698 #endif
699 //Reset to normal handler:
700 interrupt_controller_hal_set_int_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
701 //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
702 //we save.(We can also not use the same exit path for empty shared ints anymore if we delete
703 //the desc.) For now, just mark it as free.
704 handle->vector_desc->flags&=!(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED);
705 //Also kill non_iram mask bit.
706 non_iram_int_mask[handle->vector_desc->cpu]&=~(1<<(handle->vector_desc->intno));
707 }
708 portEXIT_CRITICAL(&spinlock);
709 free(handle);
710 return ESP_OK;
711 }
712
esp_intr_get_intno(intr_handle_t handle)713 int esp_intr_get_intno(intr_handle_t handle)
714 {
715 return handle->vector_desc->intno;
716 }
717
esp_intr_get_cpu(intr_handle_t handle)718 int esp_intr_get_cpu(intr_handle_t handle)
719 {
720 return handle->vector_desc->cpu;
721 }
722
723 /*
724 Interrupt disabling strategy:
725 If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected
726 interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE.
727 This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared
728 interrupts.
729 */
730
731 //Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled.
732 #define INT_MUX_DISABLED_INTNO 6
733
esp_intr_enable(intr_handle_t handle)734 esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
735 {
736 if (!handle) return ESP_ERR_INVALID_ARG;
737 portENTER_CRITICAL_SAFE(&spinlock);
738 int source;
739 if (handle->shared_vector_desc) {
740 handle->shared_vector_desc->disabled=0;
741 source=handle->shared_vector_desc->source;
742 } else {
743 source=handle->vector_desc->source;
744 }
745 if (source >= 0) {
746 //Disabled using int matrix; re-connect to enable
747 intr_matrix_set(handle->vector_desc->cpu, source, handle->vector_desc->intno);
748 } else {
749 //Re-enable using cpu int ena reg
750 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
751 ESP_INTR_ENABLE(handle->vector_desc->intno);
752 }
753 portEXIT_CRITICAL_SAFE(&spinlock);
754 return ESP_OK;
755 }
756
esp_intr_disable(intr_handle_t handle)757 esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
758 {
759 if (!handle) return ESP_ERR_INVALID_ARG;
760 portENTER_CRITICAL_SAFE(&spinlock);
761 int source;
762 bool disabled = 1;
763 if (handle->shared_vector_desc) {
764 handle->shared_vector_desc->disabled=1;
765 source=handle->shared_vector_desc->source;
766
767 shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
768 assert( svd != NULL );
769 while( svd ) {
770 if ( svd->source == source && svd->disabled == 0 ) {
771 disabled = 0;
772 break;
773 }
774 svd = svd->next;
775 }
776 } else {
777 source=handle->vector_desc->source;
778 }
779
780 if (source >= 0) {
781 if ( disabled ) {
782 //Disable using int matrix
783 intr_matrix_set(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
784 }
785 } else {
786 //Disable using per-cpu regs
787 if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
788 portEXIT_CRITICAL_SAFE(&spinlock);
789 return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
790 }
791 ESP_INTR_DISABLE(handle->vector_desc->intno);
792 }
793 portEXIT_CRITICAL_SAFE(&spinlock);
794 return ESP_OK;
795 }
796
esp_intr_noniram_disable(void)797 void IRAM_ATTR esp_intr_noniram_disable(void)
798 {
799 uint32_t oldint;
800 int cpu=cpu_hal_get_core_id();
801 uint32_t intmask=~non_iram_int_mask[cpu];
802 if (non_iram_int_disabled_flag[cpu]) abort();
803 non_iram_int_disabled_flag[cpu]=true;
804 oldint = interrupt_controller_hal_disable_int_mask(intmask);
805 //Save which ints we did disable
806 non_iram_int_disabled[cpu]=oldint&non_iram_int_mask[cpu];
807 }
808
esp_intr_noniram_enable(void)809 void IRAM_ATTR esp_intr_noniram_enable(void)
810 {
811 int cpu=cpu_hal_get_core_id();
812 int intmask=non_iram_int_disabled[cpu];
813 if (!non_iram_int_disabled_flag[cpu]) abort();
814 non_iram_int_disabled_flag[cpu]=false;
815 interrupt_controller_hal_enable_int_mask(intmask);
816 }
817
818 //These functions are provided in ROM, but the ROM-based functions use non-multicore-capable
819 //virtualized interrupt levels. Thus, we disable them in the ld file and provide working
820 //equivalents here.
821
822
ets_isr_unmask(uint32_t mask)823 void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
824 interrupt_controller_hal_enable_interrupts(mask);
825 }
826
ets_isr_mask(uint32_t mask)827 void IRAM_ATTR ets_isr_mask(uint32_t mask) {
828 interrupt_controller_hal_disable_interrupts(mask);
829 }
830
esp_intr_enable_source(int inum)831 void esp_intr_enable_source(int inum)
832 {
833 interrupt_controller_hal_enable_interrupts(1 << inum);
834 }
835
esp_intr_disable_source(int inum)836 void esp_intr_disable_source(int inum)
837 {
838 interrupt_controller_hal_disable_interrupts(1 << inum);
839 }
840