1 /*
2  * Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdbool.h>
12 #include <string.h>
13 #include <soc.h>
14 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
15 #include <esp_memory_utils.h>
16 #include <esp_attr.h>
17 #include <esp_cpu.h>
18 #include <esp_private/rtc_ctrl.h>
19 #include <limits.h>
20 #include <assert.h>
21 #include <soc/soc.h>
22 
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(esp32_intc, CONFIG_LOG_DEFAULT_LEVEL);
25 
26 #define ETS_INTERNAL_TIMER0_INTR_NO 6
27 #define ETS_INTERNAL_TIMER1_INTR_NO 15
28 #define ETS_INTERNAL_TIMER2_INTR_NO 16
29 #define ETS_INTERNAL_SW0_INTR_NO 7
30 #define ETS_INTERNAL_SW1_INTR_NO 29
31 #define ETS_INTERNAL_PROFILING_INTR_NO 11
32 
33 #define VECDESC_FL_RESERVED     (1 << 0)
34 #define VECDESC_FL_INIRAM       (1 << 1)
35 #define VECDESC_FL_SHARED       (1 << 2)
36 #define VECDESC_FL_NONSHARED    (1 << 3)
37 
38 /*
39  * Define this to debug the choices made when allocating the interrupt. This leads to much debugging
40  * output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
41  * being triggered, that is why it is separate from the normal LOG* scheme.
42  */
43 #ifdef CONFIG_INTC_ESP32_DECISIONS_LOG
44 # define INTC_LOG(...) LOG_INF(__VA_ARGS__)
45 #else
46 # define INTC_LOG(...) do {} while (false)
47 #endif
48 
49 /* Typedef for C-callable interrupt handler function */
50 typedef void (*intc_handler_t)(void *);
51 typedef void (*intc_dyn_handler_t)(const void *);
52 
53 /* shared critical section context */
54 static int esp_intc_csec;
55 
esp_intr_lock(void)56 static inline void esp_intr_lock(void)
57 {
58 	esp_intc_csec = irq_lock();
59 }
60 
esp_intr_unlock(void)61 static inline void esp_intr_unlock(void)
62 {
63 	irq_unlock(esp_intc_csec);
64 }
65 
66 /*
67  * Interrupt handler table and unhandled interrupt routine. Duplicated
68  * from xtensa_intr.c... it's supposed to be private, but we need to look
69  * into it in order to see if someone allocated an int using
70  * set_interrupt_handler.
71  */
72 struct intr_alloc_table_entry {
73 	void (*handler)(void *arg);
74 	void *arg;
75 };
76 
77 /* Default handler for unhandled interrupts. */
default_intr_handler(void * arg)78 void IRAM_ATTR default_intr_handler(void *arg)
79 {
80 	esp_rom_printf("Unhandled interrupt %d on cpu %d!\n", (int)arg, esp_cpu_get_core_id());
81 }
82 
83 static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_MAX_NUM_CPUS];
84 
set_interrupt_handler(int n,intc_handler_t f,void * arg)85 static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
86 {
87 	irq_disable(n);
88 	intr_alloc_table[n * CONFIG_MP_MAX_NUM_CPUS].handler = f;
89 	irq_connect_dynamic(n, 0, (intc_dyn_handler_t)f, arg, 0);
90 }
91 
92 /* Linked list of vector descriptions, sorted by cpu.intno value */
93 static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
94 
95 /* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
96 static uint32_t non_iram_int_mask[CONFIG_MP_MAX_NUM_CPUS];
97 /* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
98 static uint32_t non_iram_int_disabled[CONFIG_MP_MAX_NUM_CPUS];
99 static bool non_iram_int_disabled_flag[CONFIG_MP_MAX_NUM_CPUS];
100 
101 /*
102  * Inserts an item into vector_desc list so that the list is sorted
103  * with an incrementing cpu.intno value.
104  */
insert_vector_desc(struct vector_desc_t * to_insert)105 static void insert_vector_desc(struct vector_desc_t *to_insert)
106 {
107 	struct vector_desc_t *vd = vector_desc_head;
108 	struct vector_desc_t *prev = NULL;
109 
110 	while (vd != NULL) {
111 		if (vd->cpu > to_insert->cpu) {
112 			break;
113 		}
114 		if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) {
115 			break;
116 		}
117 		prev = vd;
118 		vd = vd->next;
119 	}
120 	if ((vector_desc_head == NULL) || (prev == NULL)) {
121 		/* First item */
122 		to_insert->next = vd;
123 		vector_desc_head = to_insert;
124 	} else {
125 		prev->next = to_insert;
126 		to_insert->next = vd;
127 	}
128 }
129 
130 /* Returns a vector_desc entry for an intno/cpu, or NULL if none exists. */
find_desc_for_int(int intno,int cpu)131 static struct vector_desc_t *find_desc_for_int(int intno, int cpu)
132 {
133 	struct vector_desc_t *vd = vector_desc_head;
134 
135 	while (vd != NULL) {
136 		if (vd->cpu == cpu && vd->intno == intno) {
137 			break;
138 		}
139 		vd = vd->next;
140 	}
141 	return vd;
142 }
143 
144 /*
145  * Returns a vector_desc entry for an intno/cpu.
146  * Either returns a preexisting one or allocates a new one and inserts
147  * it into the list. Returns NULL on malloc fail.
148  */
get_desc_for_int(int intno,int cpu)149 static struct vector_desc_t *get_desc_for_int(int intno, int cpu)
150 {
151 	struct vector_desc_t *vd = find_desc_for_int(intno, cpu);
152 
153 	if (vd == NULL) {
154 		struct vector_desc_t *newvd = k_malloc(sizeof(struct vector_desc_t));
155 
156 		if (newvd == NULL) {
157 			return NULL;
158 		}
159 		memset(newvd, 0, sizeof(struct vector_desc_t));
160 		newvd->intno = intno;
161 		newvd->cpu = cpu;
162 		insert_vector_desc(newvd);
163 		return newvd;
164 	} else {
165 		return vd;
166 	}
167 }
168 
169 /*
170  * Returns a vector_desc entry for an source, the cpu parameter is used
171  * to tell GPIO_INT and GPIO_NMI from different CPUs
172  */
find_desc_for_source(int source,int cpu)173 static struct vector_desc_t *find_desc_for_source(int source, int cpu)
174 {
175 	struct vector_desc_t *vd = vector_desc_head;
176 
177 	while (vd != NULL) {
178 		if (!(vd->flags & VECDESC_FL_SHARED)) {
179 			if (vd->source == source && cpu == vd->cpu) {
180 				break;
181 			}
182 		} else if (vd->cpu == cpu) {
183 			/* check only shared vds for the correct cpu, otherwise skip */
184 			bool found = false;
185 			struct shared_vector_desc_t *svd = vd->shared_vec_info;
186 
187 			assert(svd != NULL);
188 			while (svd) {
189 				if (svd->source == source) {
190 					found = true;
191 					break;
192 				}
193 				svd = svd->next;
194 			}
195 			if (found) {
196 				break;
197 			}
198 		}
199 		vd = vd->next;
200 	}
201 	return vd;
202 }
203 
esp_intr_initialize(void)204 void esp_intr_initialize(void)
205 {
206 	unsigned int num_cpus = arch_num_cpus();
207 
208 	for (size_t i = 0; i < (ESP_INTC_INTS_NUM * num_cpus); ++i) {
209 		intr_alloc_table[i].handler = default_intr_handler;
210 		intr_alloc_table[i].arg = (void *)i;
211 	}
212 }
213 
esp_intr_mark_shared(int intno,int cpu,bool is_int_ram)214 int esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
215 {
216 	if (intno >= ESP_INTC_INTS_NUM) {
217 		return -EINVAL;
218 	}
219 	if (cpu >= arch_num_cpus()) {
220 		return -EINVAL;
221 	}
222 
223 	esp_intr_lock();
224 	struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
225 
226 	if (vd == NULL) {
227 		esp_intr_unlock();
228 		return -ENOMEM;
229 	}
230 	vd->flags = VECDESC_FL_SHARED;
231 	if (is_int_ram) {
232 		vd->flags |= VECDESC_FL_INIRAM;
233 	}
234 	esp_intr_unlock();
235 
236 	return 0;
237 }
238 
esp_intr_reserve(int intno,int cpu)239 int esp_intr_reserve(int intno, int cpu)
240 {
241 	if (intno >= ESP_INTC_INTS_NUM) {
242 		return -EINVAL;
243 	}
244 	if (cpu >= arch_num_cpus()) {
245 		return -EINVAL;
246 	}
247 
248 	esp_intr_lock();
249 	struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
250 
251 	if (vd == NULL) {
252 		esp_intr_unlock();
253 		return -ENOMEM;
254 	}
255 	vd->flags = VECDESC_FL_RESERVED;
256 	esp_intr_unlock();
257 
258 	return 0;
259 }
260 
261 /* Returns true if handler for interrupt is not the default unhandled interrupt handler */
intr_has_handler(int intr,int cpu)262 static bool intr_has_handler(int intr, int cpu)
263 {
264 	bool r;
265 
266 	r = intr_alloc_table[intr * CONFIG_MP_MAX_NUM_CPUS + cpu].handler != default_intr_handler;
267 
268 	return r;
269 }
270 
is_vect_desc_usable(struct vector_desc_t * vd,int flags,int cpu,int force)271 static bool is_vect_desc_usable(struct vector_desc_t *vd, int flags, int cpu, int force)
272 {
273 	/* Check if interrupt is not reserved by design */
274 	int x = vd->intno;
275 	esp_cpu_intr_desc_t intr_desc;
276 
277 	esp_cpu_intr_get_desc(cpu, x, &intr_desc);
278 
279 	if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
280 		INTC_LOG("....Unusable: reserved");
281 		return false;
282 	}
283 	if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
284 		INTC_LOG("....Unusable: special-purpose int");
285 		return false;
286 	}
287 
288 #ifndef SOC_CPU_HAS_FLEXIBLE_INTC
289 	/* Check if the interrupt priority is acceptable */
290 	if (!(flags & (1 << intr_desc.priority))) {
291 		INTC_LOG("....Unusable: incompatible priority");
292 		return false;
293 	}
294 	/* check if edge/level type matches what we want */
295 	if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
296 		(((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
297 		INTC_LOG("....Unusable: incompatible trigger type");
298 		return false;
299 	}
300 #endif
301 
302 	/* check if interrupt is reserved at runtime */
303 	if (vd->flags & VECDESC_FL_RESERVED) {
304 		INTC_LOG("....Unusable: reserved at runtime.");
305 		return false;
306 	}
307 
308 	/* Ints can't be both shared and non-shared. */
309 	assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
310 	/* check if interrupt already is in use by a non-shared interrupt */
311 	if (vd->flags & VECDESC_FL_NONSHARED) {
312 		INTC_LOG("....Unusable: already in (non-shared) use.");
313 		return false;
314 	}
315 	/* check shared interrupt flags */
316 	if (vd->flags & VECDESC_FL_SHARED) {
317 		if (flags & ESP_INTR_FLAG_SHARED) {
318 			bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
319 			bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
320 			/*
321 			 * Bail out if int is shared, but iram property
322 			 * doesn't match what we want.
323 			 */
324 			if ((vd->flags & VECDESC_FL_SHARED) &&
325 				(desc_in_iram_flag != in_iram_flag)) {
326 				INTC_LOG("....Unusable: shared but iram prop doesn't match");
327 				return false;
328 			}
329 		} else {
330 			/*
331 			 * We need an unshared IRQ; can't use shared ones;
332 			 * bail out if this is shared.
333 			 */
334 			INTC_LOG("...Unusable: int is shared, we need non-shared.");
335 			return false;
336 		}
337 	} else if (intr_has_handler(x, cpu)) {
338 		/* Check if interrupt already is allocated by set_interrupt_handler */
339 		INTC_LOG("....Unusable: already allocated");
340 		return false;
341 	}
342 
343 	return true;
344 }
345 
346 /*
347  * Locate a free interrupt compatible with the flags given.
348  * The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
349  * When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
350  */
get_available_int(int flags,int cpu,int force,int source)351 static int get_available_int(int flags, int cpu, int force, int source)
352 {
353 	int x;
354 	int best = -1;
355 	int best_level = 9;
356 	int best_shared_ct = INT_MAX;
357 	/* Default vector desc, for vectors not in the linked list */
358 	struct vector_desc_t empty_vect_desc;
359 
360 	memset(&empty_vect_desc, 0, sizeof(struct vector_desc_t));
361 
362 	/* Level defaults to any low/med interrupt */
363 	if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
364 		flags |= ESP_INTR_FLAG_LOWMED;
365 	}
366 
367 	INTC_LOG("%s: try to find existing. Cpu: %d, Source: %d", __func__, cpu, source);
368 	struct vector_desc_t *vd = find_desc_for_source(source, cpu);
369 
370 	if (vd) {
371 		/* if existing vd found, don't need to search any more. */
372 		INTC_LOG("%s: existing vd found. intno: %d", __func__, vd->intno);
373 		if (force != -1 && force != vd->intno) {
374 			INTC_LOG("%s: intr forced but not match existing. "
375 				 "existing intno: %d, force: %d", __func__, vd->intno, force);
376 		} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
377 			INTC_LOG("%s: existing vd invalid.", __func__);
378 		} else {
379 			best = vd->intno;
380 		}
381 		return best;
382 	}
383 	if (force != -1) {
384 		INTC_LOG("%s: try to find force. "
385 			 "Cpu: %d, Source: %d, Force: %d", __func__, cpu, source, force);
386 		/* if force assigned, don't need to search any more. */
387 		vd = find_desc_for_int(force, cpu);
388 		if (vd == NULL) {
389 			/* if existing vd not found, just check the default state for the intr. */
390 			empty_vect_desc.intno = force;
391 			vd = &empty_vect_desc;
392 		}
393 		if (is_vect_desc_usable(vd, flags, cpu, force)) {
394 			best = vd->intno;
395 		} else {
396 			INTC_LOG("%s: forced vd invalid.", __func__);
397 		}
398 		return best;
399 	}
400 
401 	INTC_LOG("%s: start looking. Current cpu: %d", __func__, cpu);
402 	/* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
403 	for (x = 0; x < ESP_INTC_INTS_NUM; x++) {
404 		/* Grab the vector_desc for this vector. */
405 		vd = find_desc_for_int(x, cpu);
406 		if (vd == NULL) {
407 			empty_vect_desc.intno = x;
408 			vd = &empty_vect_desc;
409 		}
410 
411 		esp_cpu_intr_desc_t intr_desc;
412 
413 		esp_cpu_intr_get_desc(cpu, x, &intr_desc);
414 
415 		INTC_LOG("Int %d reserved %d level %d %s hasIsr %d",
416 			 x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD,
417 			 intr_desc.priority,
418 			 intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL ? "LEVEL" : "EDGE",
419 			 intr_has_handler(x, cpu));
420 
421 		if (!is_vect_desc_usable(vd, flags, cpu, force)) {
422 			continue;
423 		}
424 
425 		if (flags & ESP_INTR_FLAG_SHARED) {
426 			/* We're allocating a shared int. */
427 
428 			/* See if int already is used as a shared interrupt. */
429 			if (vd->flags & VECDESC_FL_SHARED) {
430 				/*
431 				 * We can use this already-marked-as-shared interrupt. Count the
432 				 * already attached isrs in order to see how useful it is.
433 				 */
434 				int no = 0;
435 				struct shared_vector_desc_t *svdesc = vd->shared_vec_info;
436 
437 				while (svdesc != NULL) {
438 					no++;
439 					svdesc = svdesc->next;
440 				}
441 				if (no < best_shared_ct || best_level > intr_desc.priority) {
442 					/*
443 					 * Seems like this shared vector is both okay and has
444 					 * the least amount of ISRs already attached to it.
445 					 */
446 					best = x;
447 					best_shared_ct = no;
448 					best_level = intr_desc.priority;
449 					INTC_LOG("...int %d more usable as a shared int: "
450 						 "has %d existing vectors", x, no);
451 				} else {
452 					INTC_LOG("...worse than int %d", best);
453 				}
454 			} else {
455 				if (best == -1) {
456 					/*
457 					 * We haven't found a feasible shared interrupt yet.
458 					 * This one is still free and usable, even if not
459 					 * marked as shared.
460 					 * Remember it in case we don't find any other shared
461 					 * interrupt that qualifies.
462 					 */
463 					if (best_level > intr_desc.priority) {
464 						best = x;
465 						best_level = intr_desc.priority;
466 						INTC_LOG("...int %d usable as new shared int", x);
467 					}
468 				} else {
469 					INTC_LOG("...already have a shared int");
470 				}
471 			}
472 		} else {
473 			/*
474 			 * Seems this interrupt is feasible. Select it and break out of the loop
475 			 * No need to search further.
476 			 */
477 			if (best_level > intr_desc.priority) {
478 				best = x;
479 				best_level = intr_desc.priority;
480 			} else {
481 				INTC_LOG("...worse than int %d", best);
482 			}
483 		}
484 	}
485 	INTC_LOG("%s: using int %d", __func__, best);
486 
487 	/*
488 	 * By now we have looked at all potential interrupts and
489 	 * hopefully have selected the best one in best.
490 	 */
491 	return best;
492 }
493 
494 /* Common shared isr handler. Chain-call all ISRs. */
shared_intr_isr(void * arg)495 static void IRAM_ATTR shared_intr_isr(void *arg)
496 {
497 	struct vector_desc_t *vd = (struct vector_desc_t *)arg;
498 	struct shared_vector_desc_t *sh_vec = vd->shared_vec_info;
499 
500 	esp_intr_lock();
501 	while (sh_vec) {
502 		if (!sh_vec->disabled) {
503 			if ((sh_vec->statusreg == NULL) ||
504 				(*sh_vec->statusreg & sh_vec->statusmask)) {
505 				sh_vec->isr(sh_vec->arg);
506 			}
507 		}
508 		sh_vec = sh_vec->next;
509 	}
510 	esp_intr_unlock();
511 }
512 
esp_intr_alloc_intrstatus(int source,int flags,uint32_t intrstatusreg,uint32_t intrstatusmask,intr_handler_t handler,void * arg,struct intr_handle_data_t ** ret_handle)513 int esp_intr_alloc_intrstatus(int source,
514 			      int flags,
515 			      uint32_t intrstatusreg,
516 			      uint32_t intrstatusmask,
517 			      intr_handler_t handler,
518 			      void *arg,
519 			      struct intr_handle_data_t **ret_handle)
520 {
521 	struct intr_handle_data_t *ret = NULL;
522 	int force = -1;
523 
524 	INTC_LOG("%s (cpu %d): checking args", __func__, esp_cpu_get_core_id());
525 	/* Shared interrupts should be level-triggered. */
526 	if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
527 		return -EINVAL;
528 	}
529 	/* You can't set an handler / arg for a non-C-callable interrupt. */
530 	if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
531 		return -EINVAL;
532 	}
533 	/* Shared ints should have handler and non-processor-local source */
534 	if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) {
535 		return -EINVAL;
536 	}
537 	/* Statusreg should have a mask */
538 	if (intrstatusreg && !intrstatusmask) {
539 		return -EINVAL;
540 	}
541 	/*
542 	 * If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
543 	 * If we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
544 	 * we need to make sure the interrupt is connected to the CPU0.
545 	 * CPU1 does not have access to the RTC fast memory through this region.
546 	 */
547 	if ((flags & ESP_INTR_FLAG_IRAM) && handler && !esp_ptr_in_iram(handler) &&
548 		!esp_ptr_in_rtc_iram_fast(handler)) {
549 		return -EINVAL;
550 	}
551 
552 	/*
553 	 * Default to prio 1 for shared interrupts.
554 	 * Default to prio 1, 2 or 3 for non-shared interrupts.
555 	 */
556 	if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
557 		if (flags & ESP_INTR_FLAG_SHARED) {
558 			flags |= ESP_INTR_FLAG_LEVEL1;
559 		} else {
560 			flags |= ESP_INTR_FLAG_LOWMED;
561 		}
562 	}
563 	INTC_LOG("%s (cpu %d): Args okay."
564 		"Resulting flags 0x%X", __func__, esp_cpu_get_core_id(), flags);
565 
566 	/*
567 	 * Check 'special' interrupt sources. These are tied to one specific
568 	 * interrupt, so we have to force get_available_int to only look at that.
569 	 */
570 	switch (source) {
571 	case ETS_INTERNAL_TIMER0_INTR_SOURCE:
572 		force = ETS_INTERNAL_TIMER0_INTR_NO;
573 		break;
574 	case ETS_INTERNAL_TIMER1_INTR_SOURCE:
575 		force = ETS_INTERNAL_TIMER1_INTR_NO;
576 		break;
577 	case ETS_INTERNAL_TIMER2_INTR_SOURCE:
578 		force = ETS_INTERNAL_TIMER2_INTR_NO;
579 		break;
580 	case ETS_INTERNAL_SW0_INTR_SOURCE:
581 		force = ETS_INTERNAL_SW0_INTR_NO;
582 		break;
583 	case ETS_INTERNAL_SW1_INTR_SOURCE:
584 		force = ETS_INTERNAL_SW1_INTR_NO;
585 		break;
586 	case ETS_INTERNAL_PROFILING_INTR_SOURCE:
587 		force = ETS_INTERNAL_PROFILING_INTR_NO;
588 		break;
589 	default:
590 		break;
591 	}
592 
593 	/* Allocate a return handle. If we end up not needing it, we'll free it later on. */
594 	ret = k_malloc(sizeof(struct intr_handle_data_t));
595 	if (ret == NULL) {
596 		return -ENOMEM;
597 	}
598 
599 	esp_intr_lock();
600 	int cpu = esp_cpu_get_core_id();
601 	/* See if we can find an interrupt that matches the flags. */
602 	int intr = get_available_int(flags, cpu, force, source);
603 
604 	if (intr == -1) {
605 		/* None found. Bail out. */
606 		esp_intr_unlock();
607 		k_free(ret);
608 		return -ENODEV;
609 	}
610 	/* Get an int vector desc for int. */
611 	struct vector_desc_t *vd = get_desc_for_int(intr, cpu);
612 
613 	if (vd == NULL) {
614 		esp_intr_unlock();
615 		k_free(ret);
616 		return -ENOMEM;
617 	}
618 
619 	/* Allocate that int! */
620 	if (flags & ESP_INTR_FLAG_SHARED) {
621 		/* Populate vector entry and add to linked list. */
622 		struct shared_vector_desc_t *sv = k_malloc(sizeof(struct shared_vector_desc_t));
623 
624 		if (sv == NULL) {
625 			esp_intr_unlock();
626 			k_free(ret);
627 			return -ENOMEM;
628 		}
629 		memset(sv, 0, sizeof(struct shared_vector_desc_t));
630 		sv->statusreg = (uint32_t *)intrstatusreg;
631 		sv->statusmask = intrstatusmask;
632 		sv->isr = handler;
633 		sv->arg = arg;
634 		sv->next = vd->shared_vec_info;
635 		sv->source = source;
636 		sv->disabled = 0;
637 		vd->shared_vec_info = sv;
638 		vd->flags |= VECDESC_FL_SHARED;
639 		/* (Re-)set shared isr handler to new value. */
640 		set_interrupt_handler(intr, shared_intr_isr, vd);
641 	} else {
642 		/* Mark as unusable for other interrupt sources. This is ours now! */
643 		vd->flags = VECDESC_FL_NONSHARED;
644 		if (handler) {
645 			set_interrupt_handler(intr, handler, arg);
646 		}
647 		if (flags & ESP_INTR_FLAG_EDGE) {
648 			xthal_set_intclear(1 << intr);
649 		}
650 		vd->source = source;
651 	}
652 	if (flags & ESP_INTR_FLAG_IRAM) {
653 		vd->flags |= VECDESC_FL_INIRAM;
654 		non_iram_int_mask[cpu] &= ~(1 << intr);
655 	} else {
656 		vd->flags &= ~VECDESC_FL_INIRAM;
657 		non_iram_int_mask[cpu] |= (1 << intr);
658 	}
659 	if (source >= 0) {
660 		esp_rom_route_intr_matrix(cpu, source, intr);
661 	}
662 
663 	/* Fill return handle data. */
664 	ret->vector_desc = vd;
665 	ret->shared_vector_desc = vd->shared_vec_info;
666 
667 	/* Enable int at CPU-level; */
668 	irq_enable(intr);
669 
670 	/*
671 	 * If interrupt has to be started disabled, do that now; ints won't be enabled for
672 	 * real until the end of the critical section.
673 	 */
674 	if (flags & ESP_INTR_FLAG_INTRDISABLED) {
675 		esp_intr_disable(ret);
676 	}
677 
678 #ifdef SOC_CPU_HAS_FLEXIBLE_INTC
679 	/* Extract the level from the interrupt passed flags */
680 	int level = esp_intr_flags_to_level(flags);
681 
682 	esp_cpu_intr_set_priority(intr, level);
683 
684 	if (flags & ESP_INTR_FLAG_EDGE) {
685 		esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
686 	} else {
687 		esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
688 	}
689 #endif
690 
691 	esp_intr_unlock();
692 
693 	/* Fill return handle if needed, otherwise free handle. */
694 	if (ret_handle != NULL) {
695 		*ret_handle = ret;
696 	} else {
697 		k_free(ret);
698 	}
699 
700 	LOG_DBG("Connected src %d to int %d (cpu %d)", source, intr, cpu);
701 	return 0;
702 }
703 
esp_intr_alloc(int source,int flags,intr_handler_t handler,void * arg,struct intr_handle_data_t ** ret_handle)704 int esp_intr_alloc(int source,
705 		int flags,
706 		intr_handler_t handler,
707 		void *arg,
708 		struct intr_handle_data_t **ret_handle)
709 {
710 	/*
711 	 * As an optimization, we can create a table with the possible interrupt status
712 	 * registers and masks for every single source there is. We can then add code here to
713 	 * look up an applicable value and pass that to the esp_intr_alloc_intrstatus function.
714 	 */
715 	return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
716 }
717 
esp_intr_set_in_iram(struct intr_handle_data_t * handle,bool is_in_iram)718 int IRAM_ATTR esp_intr_set_in_iram(struct intr_handle_data_t *handle, bool is_in_iram)
719 {
720 	if (!handle) {
721 		return -EINVAL;
722 	}
723 	struct vector_desc_t *vd = handle->vector_desc;
724 
725 	if (vd->flags & VECDESC_FL_SHARED) {
726 		return -EINVAL;
727 	}
728 	esp_intr_lock();
729 	uint32_t mask = (1 << vd->intno);
730 
731 	if (is_in_iram) {
732 		vd->flags |= VECDESC_FL_INIRAM;
733 		non_iram_int_mask[vd->cpu] &= ~mask;
734 	} else {
735 		vd->flags &= ~VECDESC_FL_INIRAM;
736 		non_iram_int_mask[vd->cpu] |= mask;
737 	}
738 	esp_intr_unlock();
739 	return 0;
740 }
741 
esp_intr_free(struct intr_handle_data_t * handle)742 int esp_intr_free(struct intr_handle_data_t *handle)
743 {
744 	bool free_shared_vector = false;
745 
746 	if (!handle) {
747 		return -EINVAL;
748 	}
749 
750 	esp_intr_lock();
751 	esp_intr_disable(handle);
752 	if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
753 		/* Find and kill the shared int */
754 		struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
755 		struct shared_vector_desc_t *prevsvd = NULL;
756 
757 		assert(svd); /* should be something in there for a shared int */
758 		while (svd != NULL) {
759 			if (svd == handle->shared_vector_desc) {
760 				/* Found it. Now kill it. */
761 				if (prevsvd) {
762 					prevsvd->next = svd->next;
763 				} else {
764 					handle->vector_desc->shared_vec_info = svd->next;
765 				}
766 				k_free(svd);
767 				break;
768 			}
769 			prevsvd = svd;
770 			svd = svd->next;
771 		}
772 		/* If nothing left, disable interrupt. */
773 		if (handle->vector_desc->shared_vec_info == NULL) {
774 			free_shared_vector = true;
775 		}
776 		INTC_LOG("%s: Deleting shared int: %s. "
777 			"Shared int is %s", __func__, svd ? "not found or last one" : "deleted",
778 			free_shared_vector ? "empty now." : "still in use");
779 	}
780 
781 	if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
782 		INTC_LOG("%s: Disabling int, killing handler", __func__);
783 		/* Reset to normal handler */
784 		set_interrupt_handler(handle->vector_desc->intno,
785 				      default_intr_handler,
786 				      (void *)((int)handle->vector_desc->intno));
787 		/*
788 		 * Theoretically, we could free the vector_desc... not sure if that's worth the
789 		 * few bytes of memory we save.(We can also not use the same exit path for empty
790 		 * shared ints anymore if we delete the desc.) For now, just mark it as free.
791 		 */
792 		handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED |
793 			VECDESC_FL_RESERVED | VECDESC_FL_SHARED);
794 
795 		/* Also kill non_iram mask bit. */
796 		non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno));
797 	}
798 	esp_intr_unlock();
799 	k_free(handle);
800 	return 0;
801 }
802 
esp_intr_get_intno(struct intr_handle_data_t * handle)803 int esp_intr_get_intno(struct intr_handle_data_t *handle)
804 {
805 	return handle->vector_desc->intno;
806 }
807 
esp_intr_get_cpu(struct intr_handle_data_t * handle)808 int esp_intr_get_cpu(struct intr_handle_data_t *handle)
809 {
810 	return handle->vector_desc->cpu;
811 }
812 
813 /**
814  * Interrupt disabling strategy:
815  * If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a
816  * non-connected interrupt. If the source is <0 (meaning an internal, per-cpu interrupt).
817  * This allows us to, for the muxed CPUs, disable an int from
818  * the other core. It also allows disabling shared interrupts.
819  */
820 
821 /*
822  * Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29
823  * cause the interrupt to effectively be disabled.
824  */
825 #define INT_MUX_DISABLED_INTNO 6
826 
esp_intr_enable(struct intr_handle_data_t * handle)827 int IRAM_ATTR esp_intr_enable(struct intr_handle_data_t *handle)
828 {
829 	if (!handle) {
830 		return -EINVAL;
831 	}
832 	esp_intr_lock();
833 	int source;
834 
835 	if (handle->shared_vector_desc) {
836 		handle->shared_vector_desc->disabled = 0;
837 		source = handle->shared_vector_desc->source;
838 	} else {
839 		source = handle->vector_desc->source;
840 	}
841 	if (source >= 0) {
842 		/* Disabled using int matrix; re-connect to enable */
843 		esp_rom_route_intr_matrix(handle->vector_desc->cpu,
844 			source, handle->vector_desc->intno);
845 	} else {
846 		/* Re-enable using cpu int ena reg */
847 		if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
848 			esp_intr_unlock();
849 			return -EINVAL; /* Can only enable these ints on this cpu */
850 		}
851 		irq_enable(handle->vector_desc->intno);
852 	}
853 	esp_intr_unlock();
854 	return 0;
855 }
856 
esp_intr_disable(struct intr_handle_data_t * handle)857 int IRAM_ATTR esp_intr_disable(struct intr_handle_data_t *handle)
858 {
859 	if (!handle) {
860 		return -EINVAL;
861 	}
862 	esp_intr_lock();
863 	int source;
864 	bool disabled = 1;
865 
866 	if (handle->shared_vector_desc) {
867 		handle->shared_vector_desc->disabled = 1;
868 		source = handle->shared_vector_desc->source;
869 
870 		struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
871 
872 		assert(svd != NULL);
873 		while (svd) {
874 			if (svd->source == source && svd->disabled == 0) {
875 				disabled = 0;
876 				break;
877 			}
878 			svd = svd->next;
879 		}
880 	} else {
881 		source = handle->vector_desc->source;
882 	}
883 
884 	if (source >= 0) {
885 		if (disabled) {
886 			/* Disable using int matrix */
887 			esp_rom_route_intr_matrix(handle->vector_desc->cpu,
888 				source, INT_MUX_DISABLED_INTNO);
889 		}
890 	} else {
891 		/* Disable using per-cpu regs */
892 		if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
893 			esp_intr_unlock();
894 			return -EINVAL; /* Can only enable these ints on this cpu */
895 		}
896 		irq_disable(handle->vector_desc->intno);
897 	}
898 	esp_intr_unlock();
899 	return 0;
900 }
901 
902 
esp_intr_noniram_disable(void)903 void IRAM_ATTR esp_intr_noniram_disable(void)
904 {
905 	esp_intr_lock();
906 	int oldint;
907 	int cpu = esp_cpu_get_core_id();
908 	int non_iram_ints = ~non_iram_int_mask[cpu];
909 
910 	if (non_iram_int_disabled_flag[cpu]) {
911 		abort();
912 	}
913 	non_iram_int_disabled_flag[cpu] = true;
914 	oldint = esp_cpu_intr_get_enabled_mask();
915 	esp_cpu_intr_disable(non_iram_ints);
916 	rtc_isr_noniram_disable(cpu);
917 	non_iram_int_disabled[cpu] = oldint & non_iram_ints;
918 	esp_intr_unlock();
919 }
920 
esp_intr_noniram_enable(void)921 void IRAM_ATTR esp_intr_noniram_enable(void)
922 {
923 	esp_intr_lock();
924 	int cpu = esp_cpu_get_core_id();
925 	int non_iram_ints = non_iram_int_disabled[cpu];
926 
927 	if (!non_iram_int_disabled_flag[cpu]) {
928 		abort();
929 	}
930 	non_iram_int_disabled_flag[cpu] = false;
931 	esp_cpu_intr_enable(non_iram_ints);
932 	rtc_isr_noniram_enable(cpu);
933 	esp_intr_unlock();
934 }
935