1 /*
2  * Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdbool.h>
12 #include <string.h>
13 #include <soc.h>
14 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
15 #include "esp_attr.h"
16 #include <hal/cpu_hal.h>
17 #include <hal/interrupt_controller_hal.h>
18 #include <limits.h>
19 #include <assert.h>
20 #include "soc/soc.h"
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_REGISTER(esp32_intc, CONFIG_LOG_DEFAULT_LEVEL);
23 
24 #define ETS_INTERNAL_TIMER0_INTR_NO 6
25 #define ETS_INTERNAL_TIMER1_INTR_NO 15
26 #define ETS_INTERNAL_TIMER2_INTR_NO 16
27 #define ETS_INTERNAL_SW0_INTR_NO 7
28 #define ETS_INTERNAL_SW1_INTR_NO 29
29 #define ETS_INTERNAL_PROFILING_INTR_NO 11
30 
31 #define VECDESC_FL_RESERVED     (1 << 0)
32 #define VECDESC_FL_INIRAM       (1 << 1)
33 #define VECDESC_FL_SHARED       (1 << 2)
34 #define VECDESC_FL_NONSHARED    (1 << 3)
35 
36 /*
37  * Define this to debug the choices made when allocating the interrupt. This leads to much debugging
38  * output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
39  * being triggered, that is why it is separate from the normal LOG* scheme.
40  */
41 #ifdef CONFIG_INTC_ESP32_DECISIONS_LOG
42 # define INTC_LOG(...) LOG_INF(__VA_ARGS__)
43 #else
44 # define INTC_LOG(...) do {} while (false)
45 #endif
46 
47 /* Typedef for C-callable interrupt handler function */
48 typedef void (*intc_handler_t)(void *);
49 typedef void (*intc_dyn_handler_t)(const void *);
50 
51 /* shared critical section context */
52 static int esp_intc_csec;
53 
esp_intr_lock(void)54 static inline void esp_intr_lock(void)
55 {
56 	esp_intc_csec = irq_lock();
57 }
58 
esp_intr_unlock(void)59 static inline void esp_intr_unlock(void)
60 {
61 	irq_unlock(esp_intc_csec);
62 }
63 
64 /*
65  * Interrupt handler table and unhandled interrupt routine. Duplicated
66  * from xtensa_intr.c... it's supposed to be private, but we need to look
67  * into it in order to see if someone allocated an int using
68  * set_interrupt_handler.
69  */
70 struct intr_alloc_table_entry {
71 	void (*handler)(void *arg);
72 	void *arg;
73 };
74 
75 /* Default handler for unhandled interrupts. */
default_intr_handler(void * arg)76 void default_intr_handler(void *arg)
77 {
78 	printk("Unhandled interrupt %d on cpu %d!\n", (int)arg, esp_core_id());
79 }
80 
81 static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_MAX_NUM_CPUS];
82 
set_interrupt_handler(int n,intc_handler_t f,void * arg)83 static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
84 {
85 	irq_disable(n);
86 	intr_alloc_table[n * CONFIG_MP_MAX_NUM_CPUS].handler = f;
87 	irq_connect_dynamic(n, n, (intc_dyn_handler_t)f, arg, 0);
88 	irq_enable(n);
89 }
90 
91 /* Linked list of vector descriptions, sorted by cpu.intno value */
92 static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
93 
94 /* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
95 static uint32_t non_iram_int_mask[CONFIG_MP_MAX_NUM_CPUS];
96 /* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
97 static uint32_t non_iram_int_disabled[CONFIG_MP_MAX_NUM_CPUS];
98 static bool non_iram_int_disabled_flag[CONFIG_MP_MAX_NUM_CPUS];
99 
100 /*
101  * Inserts an item into vector_desc list so that the list is sorted
102  * with an incrementing cpu.intno value.
103  */
insert_vector_desc(struct vector_desc_t * to_insert)104 static void insert_vector_desc(struct vector_desc_t *to_insert)
105 {
106 	struct vector_desc_t *vd = vector_desc_head;
107 	struct vector_desc_t *prev = NULL;
108 
109 	while (vd != NULL) {
110 		if (vd->cpu > to_insert->cpu) {
111 			break;
112 		}
113 		if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) {
114 			break;
115 		}
116 		prev = vd;
117 		vd = vd->next;
118 	}
119 	if ((vector_desc_head == NULL) || (prev == NULL)) {
120 		/* First item */
121 		to_insert->next = vd;
122 		vector_desc_head = to_insert;
123 	} else {
124 		prev->next = to_insert;
125 		to_insert->next = vd;
126 	}
127 }
128 
129 /* Returns a vector_desc entry for an intno/cpu, or NULL if none exists. */
find_desc_for_int(int intno,int cpu)130 static struct vector_desc_t *find_desc_for_int(int intno, int cpu)
131 {
132 	struct vector_desc_t *vd = vector_desc_head;
133 
134 	while (vd != NULL) {
135 		if (vd->cpu == cpu && vd->intno == intno) {
136 			break;
137 		}
138 		vd = vd->next;
139 	}
140 	return vd;
141 }
142 
143 /*
144  * Returns a vector_desc entry for an intno/cpu.
145  * Either returns a preexisting one or allocates a new one and inserts
146  * it into the list. Returns NULL on malloc fail.
147  */
get_desc_for_int(int intno,int cpu)148 static struct vector_desc_t *get_desc_for_int(int intno, int cpu)
149 {
150 	struct vector_desc_t *vd = find_desc_for_int(intno, cpu);
151 
152 	if (vd == NULL) {
153 		struct vector_desc_t *newvd = k_malloc(sizeof(struct vector_desc_t));
154 
155 		if (newvd == NULL) {
156 			return NULL;
157 		}
158 		memset(newvd, 0, sizeof(struct vector_desc_t));
159 		newvd->intno = intno;
160 		newvd->cpu = cpu;
161 		insert_vector_desc(newvd);
162 		return newvd;
163 	} else {
164 		return vd;
165 	}
166 }
167 
168 /*
169  * Returns a vector_desc entry for an source, the cpu parameter is used
170  * to tell GPIO_INT and GPIO_NMI from different CPUs
171  */
find_desc_for_source(int source,int cpu)172 static struct vector_desc_t *find_desc_for_source(int source, int cpu)
173 {
174 	struct vector_desc_t *vd = vector_desc_head;
175 
176 	while (vd != NULL) {
177 		if (!(vd->flags & VECDESC_FL_SHARED)) {
178 			if (vd->source == source && cpu == vd->cpu) {
179 				break;
180 			}
181 		} else if (vd->cpu == cpu) {
182 			/* check only shared vds for the correct cpu, otherwise skip */
183 			bool found = false;
184 			struct shared_vector_desc_t *svd = vd->shared_vec_info;
185 
186 			assert(svd != NULL);
187 			while (svd) {
188 				if (svd->source == source) {
189 					found = true;
190 					break;
191 				}
192 				svd = svd->next;
193 			}
194 			if (found) {
195 				break;
196 			}
197 		}
198 		vd = vd->next;
199 	}
200 	return vd;
201 }
202 
esp_intr_initialize(void)203 void esp_intr_initialize(void)
204 {
205 	unsigned int num_cpus = arch_num_cpus();
206 
207 	for (size_t i = 0; i < (ESP_INTC_INTS_NUM * num_cpus); ++i) {
208 		intr_alloc_table[i].handler = default_intr_handler;
209 		intr_alloc_table[i].arg = (void *)i;
210 	}
211 }
212 
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)213 int esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
214 {
215 	if (intno >= ESP_INTC_INTS_NUM) {
216 		return -EINVAL;
217 	}
218 	if (cpu >= arch_num_cpus()) {
219 		return -EINVAL;
220 	}
221 
222 	esp_intr_lock();
223 	struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
224 
225 	if (vd == NULL) {
226 		esp_intr_unlock();
227 		return -ENOMEM;
228 	}
229 	vd->flags = VECDESC_FL_SHARED;
230 	if (is_int_ram) {
231 		vd->flags |= VECDESC_FL_INIRAM;
232 	}
233 	esp_intr_unlock();
234 
235 	return 0;
236 }
237 
esp_intr_reserve(int intno,int cpu)238 int esp_intr_reserve(int intno, int cpu)
239 {
240 	if (intno >= ESP_INTC_INTS_NUM) {
241 		return -EINVAL;
242 	}
243 	if (cpu >= arch_num_cpus()) {
244 		return -EINVAL;
245 	}
246 
247 	esp_intr_lock();
248 	struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
249 
250 	if (vd == NULL) {
251 		esp_intr_unlock();
252 		return -ENOMEM;
253 	}
254 	vd->flags = VECDESC_FL_RESERVED;
255 	esp_intr_unlock();
256 
257 	return 0;
258 }
259 
260 /* Returns true if handler for interrupt is not the default unhandled interrupt handler */
intr_has_handler(int intr,int cpu)261 static bool intr_has_handler(int intr, int cpu)
262 {
263 	bool r;
264 
265 	r = intr_alloc_table[intr * CONFIG_MP_MAX_NUM_CPUS + cpu].handler != default_intr_handler;
266 
267 	return r;
268 }
269 
is_vect_desc_usable(struct vector_desc_t * vd,int flags,int cpu,int force)270 static bool is_vect_desc_usable(struct vector_desc_t *vd, int flags, int cpu, int force)
271 {
272 	/* Check if interrupt is not reserved by design */
273 	int x = vd->intno;
274 
275 	if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_RESVD) {
276 		INTC_LOG("....Unusable: reserved");
277 		return false;
278 	}
279 	if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_SPECIAL && force == -1) {
280 		INTC_LOG("....Unusable: special-purpose int");
281 		return false;
282 	}
283 	/* Check if the interrupt level is acceptable */
284 	if (!(flags & (1 << interrupt_controller_hal_get_level(x)))) {
285 		INTC_LOG("....Unusable: incompatible level");
286 		return false;
287 	}
288 	/* check if edge/level type matches what we want */
289 	if (((flags & ESP_INTR_FLAG_EDGE) &&
290 		(interrupt_controller_hal_get_type(x) == INTTP_LEVEL)) ||
291 		(((!(flags & ESP_INTR_FLAG_EDGE)) &&
292 		(interrupt_controller_hal_get_type(x) == INTTP_EDGE)))) {
293 		INTC_LOG("....Unusable: incompatible trigger type");
294 		return false;
295 	}
296 	/* check if interrupt is reserved at runtime */
297 	if (vd->flags & VECDESC_FL_RESERVED) {
298 		INTC_LOG("....Unusable: reserved at runtime.");
299 		return false;
300 	}
301 
302 	/* Ints can't be both shared and non-shared. */
303 	assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
304 	/* check if interrupt already is in use by a non-shared interrupt */
305 	if (vd->flags & VECDESC_FL_NONSHARED) {
306 		INTC_LOG("....Unusable: already in (non-shared) use.");
307 		return false;
308 	}
309 	/* check shared interrupt flags */
310 	if (vd->flags & VECDESC_FL_SHARED) {
311 		if (flags & ESP_INTR_FLAG_SHARED) {
312 			bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
313 			bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
314 			/*
315 			 * Bail out if int is shared, but iram property
316 			 * doesn't match what we want.
317 			 */
318 			if ((vd->flags & VECDESC_FL_SHARED) &&
319 				(desc_in_iram_flag != in_iram_flag)) {
320 				INTC_LOG("....Unusable: shared but iram prop doesn't match");
321 				return false;
322 			}
323 		} else {
324 			/*
325 			 * We need an unshared IRQ; can't use shared ones;
326 			 * bail out if this is shared.
327 			 */
328 			INTC_LOG("...Unusable: int is shared, we need non-shared.");
329 			return false;
330 		}
331 	} else if (intr_has_handler(x, cpu)) {
332 		/* Check if interrupt already is allocated by set_interrupt_handler */
333 		INTC_LOG("....Unusable: already allocated");
334 		return false;
335 	}
336 
337 	return true;
338 }
339 
340 /*
341  * Locate a free interrupt compatible with the flags given.
342  * The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
343  * When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
344  */
get_available_int(int flags,int cpu,int force,int source)345 static int get_available_int(int flags, int cpu, int force, int source)
346 {
347 	int x;
348 	int best = -1;
349 	int best_level = 9;
350 	int best_shared_ct = INT_MAX;
351 	/* Default vector desc, for vectors not in the linked list */
352 	struct vector_desc_t empty_vect_desc;
353 
354 	memset(&empty_vect_desc, 0, sizeof(struct vector_desc_t));
355 
356 
357 	/* Level defaults to any low/med interrupt */
358 	if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
359 		flags |= ESP_INTR_FLAG_LOWMED;
360 	}
361 
362 	INTC_LOG("%s: try to find existing. Cpu: %d, Source: %d", __func__, cpu, source);
363 	struct vector_desc_t *vd = find_desc_for_source(source, cpu);
364 
365 	if (vd) {
366 		/* if existing vd found, don't need to search any more. */
367 		INTC_LOG("%s: existing vd found. intno: %d", __func__, vd->intno);
368 		if (force != -1 && force != vd->intno) {
369 			INTC_LOG("%s: intr forced but not match existing. "
370 				 "existing intno: %d, force: %d", __func__, vd->intno, force);
371 		} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
372 			INTC_LOG("%s: existing vd invalid.", __func__);
373 		} else {
374 			best = vd->intno;
375 		}
376 		return best;
377 	}
378 	if (force != -1) {
379 		INTC_LOG("%s: try to find force. "
380 			 "Cpu: %d, Source: %d, Force: %d", __func__, cpu, source, force);
381 		/* if force assigned, don't need to search any more. */
382 		vd = find_desc_for_int(force, cpu);
383 		if (vd == NULL) {
384 			/* if existing vd not found, just check the default state for the intr. */
385 			empty_vect_desc.intno = force;
386 			vd = &empty_vect_desc;
387 		}
388 		if (is_vect_desc_usable(vd, flags, cpu, force)) {
389 			best = vd->intno;
390 		} else {
391 			INTC_LOG("%s: forced vd invalid.", __func__);
392 		}
393 		return best;
394 	}
395 
396 	INTC_LOG("%s: start looking. Current cpu: %d", __func__, cpu);
397 	/* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
398 	for (x = 0; x < ESP_INTC_INTS_NUM; x++) {
399 		/* Grab the vector_desc for this vector. */
400 		vd = find_desc_for_int(x, cpu);
401 		if (vd == NULL) {
402 			empty_vect_desc.intno = x;
403 			vd = &empty_vect_desc;
404 		}
405 
406 		INTC_LOG("Int %d reserved %d level %d %s hasIsr %d",
407 			 x,
408 			 interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_RESVD,
409 			 interrupt_controller_hal_get_level(x),
410 			 interrupt_controller_hal_get_type(x) == INTTP_LEVEL ? "LEVEL" : "EDGE",
411 			 intr_has_handler(x, cpu));
412 
413 		if (!is_vect_desc_usable(vd, flags, cpu, force)) {
414 			continue;
415 		}
416 
417 		if (flags & ESP_INTR_FLAG_SHARED) {
418 			/* We're allocating a shared int. */
419 
420 			/* See if int already is used as a shared interrupt. */
421 			if (vd->flags & VECDESC_FL_SHARED) {
422 				/*
423 				 * We can use this already-marked-as-shared interrupt. Count the
424 				 * already attached isrs in order to see how useful it is.
425 				 */
426 				int no = 0;
427 				struct shared_vector_desc_t *svdesc = vd->shared_vec_info;
428 
429 				while (svdesc != NULL) {
430 					no++;
431 					svdesc = svdesc->next;
432 				}
433 				if (no < best_shared_ct ||
434 					best_level > interrupt_controller_hal_get_level(x)) {
435 					/*
436 					 * Seems like this shared vector is both okay and has
437 					 * the least amount of ISRs already attached to it.
438 					 */
439 					best = x;
440 					best_shared_ct = no;
441 					best_level = interrupt_controller_hal_get_level(x);
442 					INTC_LOG("...int %d more usable as a shared int: "
443 						 "has %d existing vectors", x, no);
444 				} else {
445 					INTC_LOG("...worse than int %d", best);
446 				}
447 			} else {
448 				if (best == -1) {
449 					/*
450 					 * We haven't found a feasible shared interrupt yet.
451 					 * This one is still free and usable, even if not
452 					 * marked as shared.
453 					 * Remember it in case we don't find any other shared
454 					 * interrupt that qualifies.
455 					 */
456 					if (best_level > interrupt_controller_hal_get_level(x)) {
457 						best = x;
458 						best_level = interrupt_controller_hal_get_level(x);
459 						INTC_LOG("...int %d usable as new shared int", x);
460 					}
461 				} else {
462 					INTC_LOG("...already have a shared int");
463 				}
464 			}
465 		} else {
466 			/*
467 			 * Seems this interrupt is feasible. Select it and break out of the loop
468 			 * No need to search further.
469 			 */
470 			if (best_level > interrupt_controller_hal_get_level(x)) {
471 				best = x;
472 				best_level = interrupt_controller_hal_get_level(x);
473 			} else {
474 				INTC_LOG("...worse than int %d", best);
475 			}
476 		}
477 	}
478 	INTC_LOG("%s: using int %d", __func__, best);
479 
480 	/*
481 	 * By now we have looked at all potential interrupts and
482 	 * hopefully have selected the best one in best.
483 	 */
484 	return best;
485 }
486 
487 /* Common shared isr handler. Chain-call all ISRs. */
shared_intr_isr(void * arg)488 static void IRAM_ATTR shared_intr_isr(void *arg)
489 {
490 	struct vector_desc_t *vd = (struct vector_desc_t *)arg;
491 	struct shared_vector_desc_t *sh_vec = vd->shared_vec_info;
492 
493 	esp_intr_lock();
494 	while (sh_vec) {
495 		if (!sh_vec->disabled) {
496 			if (!(sh_vec->statusreg) || (*sh_vec->statusreg & sh_vec->statusmask)) {
497 				sh_vec->isr(sh_vec->arg);
498 			}
499 		}
500 		sh_vec = sh_vec->next;
501 	}
502 	esp_intr_unlock();
503 }
504 
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,struct intr_handle_data_t ** ret_handle)505 int esp_intr_alloc_intrstatus(int source,
506 			      int flags,
507 			      uint32_t intrstatusreg,
508 			      uint32_t intrstatusmask,
509 			      intr_handler_t handler,
510 			      void *arg,
511 			      struct intr_handle_data_t **ret_handle)
512 {
513 	struct intr_handle_data_t *ret = NULL;
514 	int force = -1;
515 
516 	INTC_LOG("%s (cpu %d): checking args", __func__, esp_core_id());
517 	/* Shared interrupts should be level-triggered. */
518 	if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
519 		return -EINVAL;
520 	}
521 	/* You can't set an handler / arg for a non-C-callable interrupt. */
522 	if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
523 		return -EINVAL;
524 	}
525 	/* Shared ints should have handler and non-processor-local source */
526 	if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) {
527 		return -EINVAL;
528 	}
529 	/* Statusreg should have a mask */
530 	if (intrstatusreg && !intrstatusmask) {
531 		return -EINVAL;
532 	}
533 	/*
534 	 * If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
535 	 * If we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
536 	 * we need to make sure the interrupt is connected to the CPU0.
537 	 * CPU1 does not have access to the RTC fast memory through this region.
538 	 */
539 	if ((flags & ESP_INTR_FLAG_IRAM) &&
540 	    (ptrdiff_t) handler >= SOC_RTC_IRAM_HIGH &&
541 	    (ptrdiff_t) handler < SOC_RTC_DATA_LOW) {
542 		return -EINVAL;
543 	}
544 
545 	/*
546 	 * Default to prio 1 for shared interrupts.
547 	 * Default to prio 1, 2 or 3 for non-shared interrupts.
548 	 */
549 	if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
550 		if (flags & ESP_INTR_FLAG_SHARED) {
551 			flags |= ESP_INTR_FLAG_LEVEL1;
552 		} else {
553 			flags |= ESP_INTR_FLAG_LOWMED;
554 		}
555 	}
556 	INTC_LOG("%s (cpu %d): Args okay."
557 		"Resulting flags 0x%X", __func__, esp_core_id(), flags);
558 
559 	/*
560 	 * Check 'special' interrupt sources. These are tied to one specific
561 	 * interrupt, so we have to force get_available_int to only look at that.
562 	 */
563 	switch (source) {
564 	case ETS_INTERNAL_TIMER0_INTR_SOURCE:
565 		force = ETS_INTERNAL_TIMER0_INTR_NO;
566 		break;
567 	case ETS_INTERNAL_TIMER1_INTR_SOURCE:
568 		force = ETS_INTERNAL_TIMER1_INTR_NO;
569 		break;
570 	case ETS_INTERNAL_TIMER2_INTR_SOURCE:
571 		force = ETS_INTERNAL_TIMER2_INTR_NO;
572 		break;
573 	case ETS_INTERNAL_SW0_INTR_SOURCE:
574 		force = ETS_INTERNAL_SW0_INTR_NO;
575 		break;
576 	case ETS_INTERNAL_SW1_INTR_SOURCE:
577 		force = ETS_INTERNAL_SW1_INTR_NO;
578 		break;
579 	case ETS_INTERNAL_PROFILING_INTR_SOURCE:
580 		force = ETS_INTERNAL_PROFILING_INTR_NO;
581 		break;
582 	default:
583 		break;
584 	}
585 
586 	/* Allocate a return handle. If we end up not needing it, we'll free it later on. */
587 	ret = k_malloc(sizeof(struct intr_handle_data_t));
588 	if (ret == NULL) {
589 		return -ENOMEM;
590 	}
591 
592 	esp_intr_lock();
593 	int cpu = esp_core_id();
594 	/* See if we can find an interrupt that matches the flags. */
595 	int intr = get_available_int(flags, cpu, force, source);
596 
597 	if (intr == -1) {
598 		/* None found. Bail out. */
599 		esp_intr_unlock();
600 		k_free(ret);
601 		return -ENODEV;
602 	}
603 	/* Get an int vector desc for int. */
604 	struct vector_desc_t *vd = get_desc_for_int(intr, cpu);
605 
606 	if (vd == NULL) {
607 		esp_intr_unlock();
608 		k_free(ret);
609 		return -ENOMEM;
610 	}
611 
612 	/* Allocate that int! */
613 	if (flags & ESP_INTR_FLAG_SHARED) {
614 		/* Populate vector entry and add to linked list. */
615 		struct shared_vector_desc_t *sv = k_malloc(sizeof(struct shared_vector_desc_t));
616 
617 		if (sv == NULL) {
618 			esp_intr_unlock();
619 			k_free(ret);
620 			return -ENOMEM;
621 		}
622 		memset(sv, 0, sizeof(struct shared_vector_desc_t));
623 		sv->statusreg = (uint32_t *)intrstatusreg;
624 		sv->statusmask = intrstatusmask;
625 		sv->isr = handler;
626 		sv->arg = arg;
627 		sv->next = vd->shared_vec_info;
628 		sv->source = source;
629 		sv->disabled = 0;
630 		vd->shared_vec_info = sv;
631 		vd->flags |= VECDESC_FL_SHARED;
632 		/* (Re-)set shared isr handler to new value. */
633 		set_interrupt_handler(intr, shared_intr_isr, vd);
634 	} else {
635 		/* Mark as unusable for other interrupt sources. This is ours now! */
636 		vd->flags = VECDESC_FL_NONSHARED;
637 		if (handler) {
638 			set_interrupt_handler(intr, handler, arg);
639 		}
640 		if (flags & ESP_INTR_FLAG_EDGE) {
641 			xthal_set_intclear(1 << intr);
642 		}
643 		vd->source = source;
644 	}
645 	if (flags & ESP_INTR_FLAG_IRAM) {
646 		vd->flags |= VECDESC_FL_INIRAM;
647 		non_iram_int_mask[cpu] &= ~(1 << intr);
648 	} else {
649 		vd->flags &= ~VECDESC_FL_INIRAM;
650 		non_iram_int_mask[cpu] |= (1 << intr);
651 	}
652 	if (source >= 0) {
653 		intr_matrix_set(cpu, source, intr);
654 	}
655 
656 	/* Fill return handle data. */
657 	ret->vector_desc = vd;
658 	ret->shared_vector_desc = vd->shared_vec_info;
659 
660 	/* Enable int at CPU-level; */
661 	irq_enable(intr);
662 
663 	/*
664 	 * If interrupt has to be started disabled, do that now; ints won't be enabled for
665 	 * real until the end of the critical section.
666 	 */
667 	if (flags & ESP_INTR_FLAG_INTRDISABLED) {
668 		esp_intr_disable(ret);
669 	}
670 
671 	esp_intr_unlock();
672 
673 	/* Fill return handle if needed, otherwise free handle. */
674 	if (ret_handle != NULL) {
675 		*ret_handle = ret;
676 	} else {
677 		k_free(ret);
678 	}
679 
680 	LOG_DBG("Connected src %d to int %d (cpu %d)", source, intr, cpu);
681 	return 0;
682 }
683 
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,struct intr_handle_data_t ** ret_handle)684 int esp_intr_alloc(int source,
685 		int flags,
686 		intr_handler_t handler,
687 		void *arg,
688 		struct intr_handle_data_t **ret_handle)
689 {
690 	/*
691 	 * As an optimization, we can create a table with the possible interrupt status
692 	 * registers and masks for every single source there is. We can then add code here to
693 	 * look up an applicable value and pass that to the esp_intr_alloc_intrstatus function.
694 	 */
695 	return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
696 }
697 
esp_intr_set_in_iram(struct intr_handle_data_t * handle,bool is_in_iram)698 int IRAM_ATTR esp_intr_set_in_iram(struct intr_handle_data_t *handle, bool is_in_iram)
699 {
700 	if (!handle) {
701 		return -EINVAL;
702 	}
703 	struct vector_desc_t *vd = handle->vector_desc;
704 
705 	if (vd->flags & VECDESC_FL_SHARED) {
706 		return -EINVAL;
707 	}
708 	esp_intr_lock();
709 	uint32_t mask = (1 << vd->intno);
710 
711 	if (is_in_iram) {
712 		vd->flags |= VECDESC_FL_INIRAM;
713 		non_iram_int_mask[vd->cpu] &= ~mask;
714 	} else {
715 		vd->flags &= ~VECDESC_FL_INIRAM;
716 		non_iram_int_mask[vd->cpu] |= mask;
717 	}
718 	esp_intr_unlock();
719 	return 0;
720 }
721 
esp_intr_free(struct intr_handle_data_t * handle)722 int esp_intr_free(struct intr_handle_data_t *handle)
723 {
724 	bool free_shared_vector = false;
725 
726 	if (!handle) {
727 		return -EINVAL;
728 	}
729 
730 	esp_intr_lock();
731 	esp_intr_disable(handle);
732 	if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
733 		/* Find and kill the shared int */
734 		struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
735 		struct shared_vector_desc_t *prevsvd = NULL;
736 
737 		assert(svd); /* should be something in there for a shared int */
738 		while (svd != NULL) {
739 			if (svd == handle->shared_vector_desc) {
740 				/* Found it. Now kill it. */
741 				if (prevsvd) {
742 					prevsvd->next = svd->next;
743 				} else {
744 					handle->vector_desc->shared_vec_info = svd->next;
745 				}
746 				k_free(svd);
747 				break;
748 			}
749 			prevsvd = svd;
750 			svd = svd->next;
751 		}
752 		/* If nothing left, disable interrupt. */
753 		if (handle->vector_desc->shared_vec_info == NULL) {
754 			free_shared_vector = true;
755 		}
756 		INTC_LOG("%s: Deleting shared int: %s. "
757 			"Shared int is %s", __func__, svd ? "not found or last one" : "deleted",
758 			free_shared_vector ? "empty now." : "still in use");
759 	}
760 
761 	if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
762 		INTC_LOG("%s: Disabling int, killing handler", __func__);
763 		/* Reset to normal handler */
764 		set_interrupt_handler(handle->vector_desc->intno,
765 				      default_intr_handler,
766 				      (void *)((int)handle->vector_desc->intno));
767 		/*
768 		 * Theoretically, we could free the vector_desc... not sure if that's worth the
769 		 * few bytes of memory we save.(We can also not use the same exit path for empty
770 		 * shared ints anymore if we delete the desc.) For now, just mark it as free.
771 		 */
772 		handle->vector_desc->flags &= !(VECDESC_FL_NONSHARED | VECDESC_FL_RESERVED);
773 		/* Also kill non_iram mask bit. */
774 		non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno));
775 	}
776 	esp_intr_unlock();
777 	k_free(handle);
778 	return 0;
779 }
780 
esp_intr_get_intno(struct intr_handle_data_t * handle)781 int esp_intr_get_intno(struct intr_handle_data_t *handle)
782 {
783 	return handle->vector_desc->intno;
784 }
785 
esp_intr_get_cpu(struct intr_handle_data_t * handle)786 int esp_intr_get_cpu(struct intr_handle_data_t *handle)
787 {
788 	return handle->vector_desc->cpu;
789 }
790 
791 /**
792  * Interrupt disabling strategy:
793  * If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a
794  * non-connected interrupt. If the source is <0 (meaning an internal, per-cpu interrupt).
795  * This allows us to, for the muxed CPUs, disable an int from
796  * the other core. It also allows disabling shared interrupts.
797  */
798 
799 /*
800  * Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29
801  * cause the interrupt to effectively be disabled.
802  */
803 #define INT_MUX_DISABLED_INTNO 6
804 
esp_intr_enable(struct intr_handle_data_t * handle)805 int IRAM_ATTR esp_intr_enable(struct intr_handle_data_t *handle)
806 {
807 	if (!handle) {
808 		return -EINVAL;
809 	}
810 	esp_intr_lock();
811 	int source;
812 
813 	if (handle->shared_vector_desc) {
814 		handle->shared_vector_desc->disabled = 0;
815 		source = handle->shared_vector_desc->source;
816 	} else {
817 		source = handle->vector_desc->source;
818 	}
819 	if (source >= 0) {
820 		/* Disabled using int matrix; re-connect to enable */
821 		intr_matrix_set(handle->vector_desc->cpu, source, handle->vector_desc->intno);
822 	} else {
823 		/* Re-enable using cpu int ena reg */
824 		if (handle->vector_desc->cpu != esp_core_id()) {
825 			return -EINVAL; /* Can only enable these ints on this cpu */
826 		}
827 		irq_enable(handle->vector_desc->intno);
828 	}
829 	esp_intr_unlock();
830 	return 0;
831 }
832 
esp_intr_disable(struct intr_handle_data_t * handle)833 int IRAM_ATTR esp_intr_disable(struct intr_handle_data_t *handle)
834 {
835 	if (!handle) {
836 		return -EINVAL;
837 	}
838 	esp_intr_lock();
839 	int source;
840 	bool disabled = 1;
841 
842 	if (handle->shared_vector_desc) {
843 		handle->shared_vector_desc->disabled = 1;
844 		source = handle->shared_vector_desc->source;
845 
846 		struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
847 
848 		assert(svd != NULL);
849 		while (svd) {
850 			if (svd->source == source && svd->disabled == 0) {
851 				disabled = 0;
852 				break;
853 			}
854 			svd = svd->next;
855 		}
856 	} else {
857 		source = handle->vector_desc->source;
858 	}
859 
860 	if (source >= 0) {
861 		if (disabled) {
862 			/* Disable using int matrix */
863 			intr_matrix_set(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
864 		}
865 	} else {
866 		/* Disable using per-cpu regs */
867 		if (handle->vector_desc->cpu != esp_core_id()) {
868 			esp_intr_unlock();
869 			return -EINVAL; /* Can only enable these ints on this cpu */
870 		}
871 		irq_disable(handle->vector_desc->intno);
872 	}
873 	esp_intr_unlock();
874 	return 0;
875 }
876 
877 
esp_intr_noniram_disable(void)878 void IRAM_ATTR esp_intr_noniram_disable(void)
879 {
880 	int oldint;
881 	int cpu = esp_core_id();
882 	int non_iram_ints = ~non_iram_int_mask[cpu];
883 
884 	if (non_iram_int_disabled_flag[cpu]) {
885 		abort();
886 	}
887 	non_iram_int_disabled_flag[cpu] = true;
888 	oldint = interrupt_controller_hal_read_interrupt_mask();
889 	interrupt_controller_hal_disable_interrupts(non_iram_ints);
890 	/* Save which ints we did disable */
891 	non_iram_int_disabled[cpu] = oldint & non_iram_ints;
892 }
893 
esp_intr_noniram_enable(void)894 void IRAM_ATTR esp_intr_noniram_enable(void)
895 {
896 	int cpu = esp_core_id();
897 	int non_iram_ints = non_iram_int_disabled[cpu];
898 
899 	if (!non_iram_int_disabled_flag[cpu]) {
900 		abort();
901 	}
902 	non_iram_int_disabled_flag[cpu] = false;
903 	interrupt_controller_hal_enable_interrupts(non_iram_ints);
904 }
905