1 /*
2  * Copyright (c) 2019 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/arch/cpu.h>
9 #include <kernel_arch_data.h>
10 #include <kernel_arch_func.h>
11 #include <zephyr/drivers/interrupt_controller/sysapic.h>
12 #include <zephyr/drivers/interrupt_controller/loapic.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/sys/iterable_sections.h>
16 
17 
18 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
19 
20 unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
21 #define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS)  /* # vectors free for IRQs */
22 
23 void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg);
24 const void *x86_irq_args[NR_IRQ_VECTORS];
25 
26 #if defined(CONFIG_INTEL_VTD_ICTL)
27 
28 #include <zephyr/device.h>
29 #include <zephyr/drivers/interrupt_controller/intel_vtd.h>
30 
31 static const struct device *const vtd = DEVICE_DT_GET_ONE(intel_vt_d);
32 
33 #endif /* CONFIG_INTEL_VTD_ICTL */
34 
irq_spurious(const void * arg)35 static void irq_spurious(const void *arg)
36 {
37 	LOG_ERR("Spurious interrupt, vector %d\n", (uint32_t)(uint64_t)arg);
38 	z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
39 }
40 
x86_64_irq_init(void)41 void x86_64_irq_init(void)
42 {
43 	for (int i = 0; i < NR_IRQ_VECTORS; i++) {
44 		x86_irq_funcs[i] = irq_spurious;
45 		x86_irq_args[i] = (const void *)(long)(i + IV_IRQS);
46 	}
47 }
48 
z_x86_allocate_vector(unsigned int priority,int prev_vector)49 int z_x86_allocate_vector(unsigned int priority, int prev_vector)
50 {
51 	const int VECTORS_PER_PRIORITY = 16;
52 	const int MAX_PRIORITY = 13;
53 	int vector = prev_vector;
54 	int i;
55 
56 	if (priority >= MAX_PRIORITY) {
57 		priority = MAX_PRIORITY;
58 	}
59 
60 	if (vector == -1) {
61 		vector = (priority * VECTORS_PER_PRIORITY) + IV_IRQS;
62 	}
63 
64 	for (i = 0; i < VECTORS_PER_PRIORITY; ++i, ++vector) {
65 		if (prev_vector != 1 && vector == prev_vector) {
66 			continue;
67 		}
68 
69 #ifdef CONFIG_IRQ_OFFLOAD
70 		if (vector == CONFIG_IRQ_OFFLOAD_VECTOR) {
71 			continue;
72 		}
73 #endif
74 		if (vector == Z_X86_OOPS_VECTOR) {
75 			continue;
76 		}
77 
78 		if (x86_irq_funcs[vector - IV_IRQS] == irq_spurious) {
79 			return vector;
80 		}
81 	}
82 
83 	return -1;
84 }
85 
z_x86_irq_connect_on_vector(unsigned int irq,uint8_t vector,void (* func)(const void * arg),const void * arg)86 void z_x86_irq_connect_on_vector(unsigned int irq,
87 				 uint8_t vector,
88 				 void (*func)(const void *arg),
89 				 const void *arg)
90 {
91 	_irq_to_interrupt_vector[irq] = vector;
92 	x86_irq_funcs[vector - IV_IRQS] = func;
93 	x86_irq_args[vector - IV_IRQS] = arg;
94 }
95 
96 /*
97  * N.B.: the API docs don't say anything about returning error values, but
98  * this function returns -1 if a vector at the specific priority can't be
99  * allocated. Whether it should simply __ASSERT instead is up for debate.
100  */
101 
arch_irq_connect_dynamic(unsigned int irq,unsigned int priority,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)102 int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
103 			     void (*routine)(const void *parameter),
104 			     const void *parameter, uint32_t flags)
105 {
106 	uint32_t key;
107 	int vector;
108 
109 	__ASSERT(irq <= CONFIG_MAX_IRQ_LINES, "IRQ %u out of range", irq);
110 
111 	key = irq_lock();
112 
113 	vector = z_x86_allocate_vector(priority, -1);
114 	if (vector >= 0) {
115 #if defined(CONFIG_INTEL_VTD_ICTL)
116 		if (device_is_ready(vtd)) {
117 			int irte = vtd_allocate_entries(vtd, 1);
118 
119 			__ASSERT(irte >= 0, "IRTE allocation must succeed");
120 
121 			vtd_set_irte_vector(vtd, irte, vector);
122 			vtd_set_irte_irq(vtd, irte, irq);
123 		}
124 #endif /* CONFIG_INTEL_VTD_ICTL */
125 
126 		z_irq_controller_irq_config(vector, irq, flags);
127 		z_x86_irq_connect_on_vector(irq, vector, routine, parameter);
128 	}
129 
130 	irq_unlock(key);
131 	return vector;
132 }
133 
134 
135 /* The first bit is used to indicate whether the list of reserved interrupts
136  * have been initialized based on content stored in the irq_alloc linker
137  * section in ROM.
138  */
139 #define IRQ_LIST_INITIALIZED 0
140 
141 static ATOMIC_DEFINE(irq_reserved, CONFIG_MAX_IRQ_LINES);
142 
irq_init(void)143 static void irq_init(void)
144 {
145 	TYPE_SECTION_FOREACH(const uint8_t, irq_alloc, irq) {
146 		__ASSERT_NO_MSG(*irq < CONFIG_MAX_IRQ_LINES);
147 		atomic_set_bit(irq_reserved, *irq);
148 	}
149 }
150 
arch_irq_allocate(void)151 unsigned int arch_irq_allocate(void)
152 {
153 	unsigned int key = irq_lock();
154 	int i;
155 
156 	if (!atomic_test_and_set_bit(irq_reserved, IRQ_LIST_INITIALIZED)) {
157 		irq_init();
158 	}
159 
160 	for (i = 0; i < ARRAY_SIZE(irq_reserved); i++) {
161 		unsigned int fz, irq;
162 
163 		while ((fz = find_lsb_set(~atomic_get(&irq_reserved[i])))) {
164 			irq = (fz - 1) + (i * sizeof(atomic_val_t) * 8);
165 			if (irq >= CONFIG_MAX_IRQ_LINES) {
166 				break;
167 			}
168 
169 			if (!atomic_test_and_set_bit(irq_reserved, irq)) {
170 				irq_unlock(key);
171 				return irq;
172 			}
173 		}
174 	}
175 
176 	irq_unlock(key);
177 
178 	return UINT_MAX;
179 }
180 
arch_irq_set_used(unsigned int irq)181 void arch_irq_set_used(unsigned int irq)
182 {
183 	unsigned int key = irq_lock();
184 
185 	atomic_set_bit(irq_reserved, irq);
186 
187 	irq_unlock(key);
188 }
189 
arch_irq_is_used(unsigned int irq)190 bool arch_irq_is_used(unsigned int irq)
191 {
192 	return atomic_test_bit(irq_reserved, irq);
193 }
194