1 /*
2 * Copyright (c) 2019 Intel Corp.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #ifndef ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
7 #define ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
8
9 #include <devicetree.h>
10
11 /* Changing this value will require manual changes to exception and IDT setup
12 * in locore.S for intel64
13 */
14 #define Z_X86_OOPS_VECTOR 32
15
16 #if !defined(_ASMLANGUAGE)
17
18 #include <sys/sys_io.h>
19 #include <zephyr/types.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <irq.h>
23 #include <arch/x86/mmustructs.h>
24 #include <arch/x86/thread_stack.h>
25 #include <linker/sections.h>
26
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30
31 #ifdef CONFIG_PCIE_MSI
32
33 struct x86_msi_vector {
34 unsigned int irq;
35 uint8_t vector;
36 #ifdef CONFIG_INTEL_VTD_ICTL
37 bool remap;
38 uint8_t irte;
39 #endif
40 };
41
42 typedef struct x86_msi_vector arch_msi_vector_t;
43
44 #endif /* CONFIG_PCIE_MSI */
45
arch_irq_unlock(unsigned int key)46 static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
47 {
48 if ((key & 0x00000200U) != 0U) { /* 'IF' bit */
49 __asm__ volatile ("sti" ::: "memory");
50 }
51 }
52
sys_out8(uint8_t data,io_port_t port)53 static ALWAYS_INLINE void sys_out8(uint8_t data, io_port_t port)
54 {
55 __asm__ volatile("outb %b0, %w1" :: "a"(data), "Nd"(port));
56 }
57
sys_in8(io_port_t port)58 static ALWAYS_INLINE uint8_t sys_in8(io_port_t port)
59 {
60 uint8_t ret;
61
62 __asm__ volatile("inb %w1, %b0" : "=a"(ret) : "Nd"(port));
63
64 return ret;
65 }
66
sys_out16(uint16_t data,io_port_t port)67 static ALWAYS_INLINE void sys_out16(uint16_t data, io_port_t port)
68 {
69 __asm__ volatile("outw %w0, %w1" :: "a"(data), "Nd"(port));
70 }
71
sys_in16(io_port_t port)72 static ALWAYS_INLINE uint16_t sys_in16(io_port_t port)
73 {
74 uint16_t ret;
75
76 __asm__ volatile("inw %w1, %w0" : "=a"(ret) : "Nd"(port));
77
78 return ret;
79 }
80
sys_out32(uint32_t data,io_port_t port)81 static ALWAYS_INLINE void sys_out32(uint32_t data, io_port_t port)
82 {
83 __asm__ volatile("outl %0, %w1" :: "a"(data), "Nd"(port));
84 }
85
sys_in32(io_port_t port)86 static ALWAYS_INLINE uint32_t sys_in32(io_port_t port)
87 {
88 uint32_t ret;
89
90 __asm__ volatile("inl %w1, %0" : "=a"(ret) : "Nd"(port));
91
92 return ret;
93 }
94
sys_write8(uint8_t data,mm_reg_t addr)95 static ALWAYS_INLINE void sys_write8(uint8_t data, mm_reg_t addr)
96 {
97 __asm__ volatile("movb %0, %1"
98 :
99 : "q"(data), "m" (*(volatile uint8_t *)(uintptr_t) addr)
100 : "memory");
101 }
102
sys_read8(mm_reg_t addr)103 static ALWAYS_INLINE uint8_t sys_read8(mm_reg_t addr)
104 {
105 uint8_t ret;
106
107 __asm__ volatile("movb %1, %0"
108 : "=q"(ret)
109 : "m" (*(volatile uint8_t *)(uintptr_t) addr)
110 : "memory");
111
112 return ret;
113 }
114
sys_write16(uint16_t data,mm_reg_t addr)115 static ALWAYS_INLINE void sys_write16(uint16_t data, mm_reg_t addr)
116 {
117 __asm__ volatile("movw %0, %1"
118 :
119 : "r"(data), "m" (*(volatile uint16_t *)(uintptr_t) addr)
120 : "memory");
121 }
122
sys_read16(mm_reg_t addr)123 static ALWAYS_INLINE uint16_t sys_read16(mm_reg_t addr)
124 {
125 uint16_t ret;
126
127 __asm__ volatile("movw %1, %0"
128 : "=r"(ret)
129 : "m" (*(volatile uint16_t *)(uintptr_t) addr)
130 : "memory");
131
132 return ret;
133 }
134
sys_write32(uint32_t data,mm_reg_t addr)135 static ALWAYS_INLINE void sys_write32(uint32_t data, mm_reg_t addr)
136 {
137 __asm__ volatile("movl %0, %1"
138 :
139 : "r"(data), "m" (*(volatile uint32_t *)(uintptr_t) addr)
140 : "memory");
141 }
142
sys_read32(mm_reg_t addr)143 static ALWAYS_INLINE uint32_t sys_read32(mm_reg_t addr)
144 {
145 uint32_t ret;
146
147 __asm__ volatile("movl %1, %0"
148 : "=r"(ret)
149 : "m" (*(volatile uint32_t *)(uintptr_t) addr)
150 : "memory");
151
152 return ret;
153 }
154
sys_set_bit(mem_addr_t addr,unsigned int bit)155 static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
156 {
157 __asm__ volatile("btsl %1, %0"
158 : "+m" (*(volatile uint8_t *) (addr))
159 : "Ir" (bit)
160 : "memory");
161 }
162
sys_clear_bit(mem_addr_t addr,unsigned int bit)163 static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
164 {
165 __asm__ volatile("btrl %1, %0"
166 : "+m" (*(volatile uint8_t *) (addr))
167 : "Ir" (bit));
168 }
169
sys_test_bit(mem_addr_t addr,unsigned int bit)170 static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
171 {
172 int ret;
173
174 __asm__ volatile("btl %2, %1;"
175 "sbb %0, %0"
176 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
177 : "Ir" (bit));
178
179 return ret;
180 }
181
sys_test_and_set_bit(mem_addr_t addr,unsigned int bit)182 static ALWAYS_INLINE int sys_test_and_set_bit(mem_addr_t addr,
183 unsigned int bit)
184 {
185 int ret;
186
187 __asm__ volatile("btsl %2, %1;"
188 "sbb %0, %0"
189 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
190 : "Ir" (bit));
191
192 return ret;
193 }
194
sys_test_and_clear_bit(mem_addr_t addr,unsigned int bit)195 static ALWAYS_INLINE int sys_test_and_clear_bit(mem_addr_t addr,
196 unsigned int bit)
197 {
198 int ret;
199
200 __asm__ volatile("btrl %2, %1;"
201 "sbb %0, %0"
202 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
203 : "Ir" (bit));
204
205 return ret;
206 }
207
208 #define sys_bitfield_set_bit sys_set_bit
209 #define sys_bitfield_clear_bit sys_clear_bit
210 #define sys_bitfield_test_bit sys_test_bit
211 #define sys_bitfield_test_and_set_bit sys_test_and_set_bit
212 #define sys_bitfield_test_and_clear_bit sys_test_and_clear_bit
213
214 /*
215 * Map of IRQ numbers to their assigned vectors. On IA32, this is generated
216 * at build time and defined via the linker script. On Intel64, it's an array.
217 */
218
219 extern unsigned char _irq_to_interrupt_vector[];
220
221 #define Z_IRQ_TO_INTERRUPT_VECTOR(irq) \
222 ((unsigned int) _irq_to_interrupt_vector[irq])
223
224
225 #endif /* _ASMLANGUAGE */
226
227 #ifdef __cplusplus
228 }
229 #endif
230
231 #include <drivers/interrupt_controller/sysapic.h>
232
233 #ifdef CONFIG_X86_64
234 #include <arch/x86/intel64/arch.h>
235 #else
236 #include <arch/x86/ia32/arch.h>
237 #endif
238
239 #include <arch/common/ffs.h>
240
241 #ifdef __cplusplus
242 extern "C" {
243 #endif
244
245 #ifndef _ASMLANGUAGE
246
247 extern void arch_irq_enable(unsigned int irq);
248 extern void arch_irq_disable(unsigned int irq);
249
250 extern uint32_t sys_clock_cycle_get_32(void);
251
252 __pinned_func
arch_k_cycle_get_32(void)253 static inline uint32_t arch_k_cycle_get_32(void)
254 {
255 return sys_clock_cycle_get_32();
256 }
257
arch_irq_unlocked(unsigned int key)258 static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
259 {
260 return (key & 0x200) != 0;
261 }
262
263 /**
264 * @brief read timestamp register, 32-bits only, unserialized
265 */
266
z_do_read_cpu_timestamp32(void)267 static ALWAYS_INLINE uint32_t z_do_read_cpu_timestamp32(void)
268 {
269 uint32_t rv;
270
271 __asm__ volatile("rdtsc" : "=a" (rv) : : "%edx");
272
273 return rv;
274 }
275
276 /**
277 * @brief read timestamp register ensuring serialization
278 */
279
280 __pinned_func
z_tsc_read(void)281 static inline uint64_t z_tsc_read(void)
282 {
283 union {
284 struct {
285 uint32_t lo;
286 uint32_t hi;
287 };
288 uint64_t value;
289 } rv;
290
291 #ifdef CONFIG_X86_64
292 /*
293 * According to Intel 64 and IA-32 Architectures Software
294 * Developer’s Manual, volume 3, chapter 8.2.5, LFENCE provides
295 * a more efficient method of controlling memory ordering than
296 * the CPUID instruction. So use LFENCE here, as all 64-bit
297 * CPUs have LFENCE.
298 */
299 __asm__ volatile ("lfence");
300 #else
301 /* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
302 __asm__ volatile (/* serialize */
303 "xorl %%eax,%%eax;"
304 "cpuid"
305 :
306 :
307 : "%eax", "%ebx", "%ecx", "%edx"
308 );
309 #endif
310
311 #ifdef CONFIG_X86_64
312 /*
313 * We cannot use "=A", since this would use %rax on x86_64 and
314 * return only the lower 32bits of the TSC
315 */
316 __asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));
317 #else
318 /* "=A" means that value is in eax:edx pair. */
319 __asm__ volatile ("rdtsc" : "=A" (rv.value));
320 #endif
321
322 return rv.value;
323 }
324
arch_nop(void)325 static ALWAYS_INLINE void arch_nop(void)
326 {
327 __asm__ volatile("nop");
328 }
329
330 #endif /* _ASMLANGUAGE */
331
332 #ifdef __cplusplus
333 }
334 #endif
335
336 #endif /* ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_ */
337