1 /*
2 * Copyright (c) 2019 Intel Corp.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #ifndef ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
7 #define ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
8
9 #include <zephyr/devicetree.h>
10
11 /* Changing this value will require manual changes to exception and IDT setup
12 * in locore.S for intel64
13 */
14 #define Z_X86_OOPS_VECTOR 32
15
16 #if !defined(_ASMLANGUAGE)
17
18 #include <zephyr/sys/sys_io.h>
19 #include <zephyr/types.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <zephyr/irq.h>
23 #include <zephyr/arch/x86/mmustructs.h>
24 #include <zephyr/arch/x86/thread_stack.h>
25 #include <zephyr/linker/sections.h>
26
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30
31 #ifdef CONFIG_PCIE_MSI
32
33 struct x86_msi_vector {
34 unsigned int irq;
35 uint8_t vector;
36 #ifdef CONFIG_INTEL_VTD_ICTL
37 bool remap;
38 uint8_t irte;
39 #endif
40 };
41
42 typedef struct x86_msi_vector arch_msi_vector_t;
43
44 #endif /* CONFIG_PCIE_MSI */
45
arch_irq_unlock(unsigned int key)46 static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
47 {
48 if ((key & 0x00000200U) != 0U) { /* 'IF' bit */
49 __asm__ volatile ("sti" ::: "memory");
50 }
51 }
52
sys_out8(uint8_t data,io_port_t port)53 static ALWAYS_INLINE void sys_out8(uint8_t data, io_port_t port)
54 {
55 __asm__ volatile("outb %b0, %w1" :: "a"(data), "Nd"(port));
56 }
57
sys_in8(io_port_t port)58 static ALWAYS_INLINE uint8_t sys_in8(io_port_t port)
59 {
60 uint8_t ret;
61
62 __asm__ volatile("inb %w1, %b0" : "=a"(ret) : "Nd"(port));
63
64 return ret;
65 }
66
sys_out16(uint16_t data,io_port_t port)67 static ALWAYS_INLINE void sys_out16(uint16_t data, io_port_t port)
68 {
69 __asm__ volatile("outw %w0, %w1" :: "a"(data), "Nd"(port));
70 }
71
sys_in16(io_port_t port)72 static ALWAYS_INLINE uint16_t sys_in16(io_port_t port)
73 {
74 uint16_t ret;
75
76 __asm__ volatile("inw %w1, %w0" : "=a"(ret) : "Nd"(port));
77
78 return ret;
79 }
80
sys_out32(uint32_t data,io_port_t port)81 static ALWAYS_INLINE void sys_out32(uint32_t data, io_port_t port)
82 {
83 __asm__ volatile("outl %0, %w1" :: "a"(data), "Nd"(port));
84 }
85
sys_in32(io_port_t port)86 static ALWAYS_INLINE uint32_t sys_in32(io_port_t port)
87 {
88 uint32_t ret;
89
90 __asm__ volatile("inl %w1, %0" : "=a"(ret) : "Nd"(port));
91
92 return ret;
93 }
94
sys_write8(uint8_t data,mm_reg_t addr)95 static ALWAYS_INLINE void sys_write8(uint8_t data, mm_reg_t addr)
96 {
97 __asm__ volatile("movb %0, %1"
98 :
99 : "q"(data), "m" (*(volatile uint8_t *)(uintptr_t) addr)
100 : "memory");
101 }
102
sys_read8(mm_reg_t addr)103 static ALWAYS_INLINE uint8_t sys_read8(mm_reg_t addr)
104 {
105 uint8_t ret;
106
107 __asm__ volatile("movb %1, %0"
108 : "=q"(ret)
109 : "m" (*(volatile uint8_t *)(uintptr_t) addr)
110 : "memory");
111
112 return ret;
113 }
114
sys_write16(uint16_t data,mm_reg_t addr)115 static ALWAYS_INLINE void sys_write16(uint16_t data, mm_reg_t addr)
116 {
117 __asm__ volatile("movw %0, %1"
118 :
119 : "r"(data), "m" (*(volatile uint16_t *)(uintptr_t) addr)
120 : "memory");
121 }
122
sys_read16(mm_reg_t addr)123 static ALWAYS_INLINE uint16_t sys_read16(mm_reg_t addr)
124 {
125 uint16_t ret;
126
127 __asm__ volatile("movw %1, %0"
128 : "=r"(ret)
129 : "m" (*(volatile uint16_t *)(uintptr_t) addr)
130 : "memory");
131
132 return ret;
133 }
134
sys_write32(uint32_t data,mm_reg_t addr)135 static ALWAYS_INLINE void sys_write32(uint32_t data, mm_reg_t addr)
136 {
137 __asm__ volatile("movl %0, %1"
138 :
139 : "r"(data), "m" (*(volatile uint32_t *)(uintptr_t) addr)
140 : "memory");
141 }
142
sys_read32(mm_reg_t addr)143 static ALWAYS_INLINE uint32_t sys_read32(mm_reg_t addr)
144 {
145 uint32_t ret;
146
147 __asm__ volatile("movl %1, %0"
148 : "=r"(ret)
149 : "m" (*(volatile uint32_t *)(uintptr_t) addr)
150 : "memory");
151
152 return ret;
153 }
154
sys_set_bit(mem_addr_t addr,unsigned int bit)155 static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
156 {
157 __asm__ volatile("btsl %1, %0"
158 : "+m" (*(volatile uint8_t *) (addr))
159 : "Ir" (bit)
160 : "memory");
161 }
162
sys_clear_bit(mem_addr_t addr,unsigned int bit)163 static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
164 {
165 __asm__ volatile("btrl %1, %0"
166 : "+m" (*(volatile uint8_t *) (addr))
167 : "Ir" (bit));
168 }
169
sys_test_bit(mem_addr_t addr,unsigned int bit)170 static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
171 {
172 int ret;
173
174 __asm__ volatile("btl %2, %1;"
175 "sbb %0, %0"
176 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
177 : "Ir" (bit));
178
179 return ret;
180 }
181
sys_test_and_set_bit(mem_addr_t addr,unsigned int bit)182 static ALWAYS_INLINE int sys_test_and_set_bit(mem_addr_t addr,
183 unsigned int bit)
184 {
185 int ret;
186
187 __asm__ volatile("btsl %2, %1;"
188 "sbb %0, %0"
189 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
190 : "Ir" (bit));
191
192 return ret;
193 }
194
sys_test_and_clear_bit(mem_addr_t addr,unsigned int bit)195 static ALWAYS_INLINE int sys_test_and_clear_bit(mem_addr_t addr,
196 unsigned int bit)
197 {
198 int ret;
199
200 __asm__ volatile("btrl %2, %1;"
201 "sbb %0, %0"
202 : "=r" (ret), "+m" (*(volatile uint8_t *) (addr))
203 : "Ir" (bit));
204
205 return ret;
206 }
207
208 #define sys_bitfield_set_bit sys_set_bit
209 #define sys_bitfield_clear_bit sys_clear_bit
210 #define sys_bitfield_test_bit sys_test_bit
211 #define sys_bitfield_test_and_set_bit sys_test_and_set_bit
212 #define sys_bitfield_test_and_clear_bit sys_test_and_clear_bit
213
214 /*
215 * Map of IRQ numbers to their assigned vectors. On IA32, this is generated
216 * at build time and defined via the linker script. On Intel64, it's an array.
217 */
218
219 extern unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
220
221 #define Z_IRQ_TO_INTERRUPT_VECTOR(irq) \
222 ((unsigned int) _irq_to_interrupt_vector[(irq)])
223
224
225 #endif /* _ASMLANGUAGE */
226
227 #ifdef __cplusplus
228 }
229 #endif
230
231 #include <zephyr/drivers/interrupt_controller/sysapic.h>
232
233 #ifdef CONFIG_X86_64
234 #include <zephyr/arch/x86/intel64/arch.h>
235 #else
236 #include <zephyr/arch/x86/ia32/arch.h>
237 #endif
238
239 #include <zephyr/arch/common/ffs.h>
240
241 #ifdef __cplusplus
242 extern "C" {
243 #endif
244
245 #ifndef _ASMLANGUAGE
246
247 void arch_irq_enable(unsigned int irq);
248 void arch_irq_disable(unsigned int irq);
249
250 uint32_t sys_clock_cycle_get_32(void);
251
252 __pinned_func
arch_k_cycle_get_32(void)253 static inline uint32_t arch_k_cycle_get_32(void)
254 {
255 return sys_clock_cycle_get_32();
256 }
257
258 uint64_t sys_clock_cycle_get_64(void);
259
260 __pinned_func
arch_k_cycle_get_64(void)261 static inline uint64_t arch_k_cycle_get_64(void)
262 {
263 return sys_clock_cycle_get_64();
264 }
265
arch_irq_unlocked(unsigned int key)266 static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
267 {
268 return (key & 0x200) != 0;
269 }
270
271 /**
272 * @brief read timestamp register, 32-bits only, unserialized
273 */
274
z_do_read_cpu_timestamp32(void)275 static ALWAYS_INLINE uint32_t z_do_read_cpu_timestamp32(void)
276 {
277 uint32_t rv;
278
279 __asm__ volatile("rdtsc" : "=a" (rv) : : "%edx");
280
281 return rv;
282 }
283
284 /**
285 * @brief read timestamp register ensuring serialization
286 */
287
288 __pinned_func
z_tsc_read(void)289 static inline uint64_t z_tsc_read(void)
290 {
291 union {
292 struct {
293 uint32_t lo;
294 uint32_t hi;
295 };
296 uint64_t value;
297 } rv;
298
299 #ifdef CONFIG_X86_64
300 /*
301 * According to Intel 64 and IA-32 Architectures Software
302 * Developer’s Manual, volume 3, chapter 8.2.5, LFENCE provides
303 * a more efficient method of controlling memory ordering than
304 * the CPUID instruction. So use LFENCE here, as all 64-bit
305 * CPUs have LFENCE.
306 */
307 __asm__ volatile ("lfence");
308 #else
309 /* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
310 __asm__ volatile (/* serialize */
311 "xorl %%eax,%%eax;"
312 "cpuid"
313 :
314 :
315 : "%eax", "%ebx", "%ecx", "%edx"
316 );
317 #endif
318
319 #ifdef CONFIG_X86_64
320 /*
321 * We cannot use "=A", since this would use %rax on x86_64 and
322 * return only the lower 32bits of the TSC
323 */
324 __asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));
325 #else
326 /* "=A" means that value is in eax:edx pair. */
327 __asm__ volatile ("rdtsc" : "=A" (rv.value));
328 #endif
329
330 return rv.value;
331 }
332
arch_nop(void)333 static ALWAYS_INLINE void arch_nop(void)
334 {
335 __asm__ volatile("nop");
336 }
337
338 #endif /* _ASMLANGUAGE */
339
340 #ifdef __cplusplus
341 }
342 #endif
343
344 #endif /* ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_ */
345