1/*
2 * Copyright (c) 2019 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5#define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic))
6
7#include <zephyr/toolchain.h>
8#include <zephyr/arch/x86/multiboot.h>
9#include <zephyr/arch/x86/efi.h>
10#include <zephyr/sys/util.h>
11#include <zephyr/arch/x86/msr.h>
12#include <kernel_arch_data.h>
13#include <offsets_short.h>
14#include <zephyr/drivers/interrupt_controller/loapic.h>
15#include <zephyr/arch/cpu.h>
16#include <zephyr/kernel/mm.h>
17
18/*
19 * Definitions/macros for enabling paging
20 */
21
22/* Long mode, no-execute, syscall */
23#define EFER_BITS (X86_EFER_MSR_LME | X86_EFER_MSR_NXE | X86_EFER_MSR_SCE)
24
25/* Paging, write-protect */
26#define CR0_BITS (CR0_PG | CR0_WP)
27
28/* PAE, SSE */
29#define CR4_BITS (CR4_PAE | CR4_OSFXSR)
30
31.macro set_efer
32	movl $X86_EFER_MSR, %ecx
33	rdmsr
34	orl $EFER_BITS, %eax
35	wrmsr
36.endm
37
38.macro install_pagetables_32
39	movl %cr4, %eax
40	orl $CR4_BITS, %eax
41	movl %eax, %cr4
42	clts
43
44	/* Page tables created at build time by gen_mmu.py
45	 * NOTE: Presumes phys=virt
46	 */
47	movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
48	movl %eax, %cr3
49
50	set_efer
51
52	movl %cr0, %eax
53	orl $CR0_BITS, %eax
54	movl %eax, %cr0
55.endm
56
57.macro install_pagetables_64
58	/* Here, we are already in long mode with paging enabled and
59	 * just need to switch to our own page tables, but let's be
60	 * paranoid and ensure CR4, CR0, and EFER_MSR are set up
61	 * exactly how we expect. Logic is the same as install_pagetables_32
62	 */
63	movq %cr4, %rax
64	orq $CR4_BITS, %rax
65	movq %rax, %cr4
66	clts
67
68	/* NOTE: Presumes phys=virt */
69	movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
70	movq %rax, %cr3
71
72	set_efer
73
74	movq %cr0, %rax
75	/* Use 32-bit instructions due to assembler fussiness with large
76	 * immediate values with `orq`, CR0_PG is bit 31. We don't ever set any
77	 * high bits in cr0 anyway.
78	 */
79	orl $CR0_BITS, %eax
80	movq %rax, %cr0
81.endm
82
83.macro DEFINE_TSS_STACK_ARRAY
84	.irp idx, DEFINE_STACK_ARRAY_IDX
85		.word __X86_TSS64_SIZEOF-1
86		.word tss\idx
87		.word 0x8900
88		.word 0, 0, 0, 0, 0
89	.endr
90.endm
91
92/* The .locore section begins the page-aligned initialization region
93 * of low memory.  The first address is used as the architectural
94 * entry point for auxiliary CPUs being brought up (in real mode!)
95 * via a startup IPI.  It's is ALSO used by some loaders (well,
96 * ACRN...) who hard-coded the address by inspecting _start on a
97 * non-SMP build.
98 *
99 *               === OUTRAGEOUS HACK FOLLOWS ===
100 *
101 * Therefore it needs to start at OS entry with a 32 bit jump to the
102 * 32 bit entry point, and gets clobbered later (see the beginning of
103 * __start32) with NOP bytes such that the next CPU will fall through
104 * to the 16 bit SMP entry.
105 *
106 * We write out the JMP followed by 8 NOPs for simplicity.  No i386
107 * JMP encodes with more than 8 bytes, so we can come back later and
108 * scribble over it with 8 0x90 bytes (which is the 1-byte NOP) and be
109 * sure to get all of it without overwriting anything.
110 */
111.section .locore,"ax"
112.code32
113.globl __start
114__start:
115	jmp __start32
116	nop
117	nop
118	nop
119	nop
120	nop
121	nop
122	nop
123	nop
124
125.code16
126.global x86_ap_start
127x86_ap_start:
128	/*
129	 * First, we move to 32-bit protected mode, and set up the
130	 * same flat environment that the BSP gets from the loader.
131	 */
132	lgdt gdt48
133	lidt idt48
134	movl %cr0, %eax
135	or $1, %eax
136	movl %eax, %cr0
137
138	jmpl $X86_KERNEL_CS_32, $1f
139.code32
1401:	movw $X86_KERNEL_DS_32, %ax
141	movw %ax, %ds
142	movw %ax, %es
143	movw %ax, %ss
144	movw %ax, %fs
145
146	/*
147	 * Now, reverse-map our local APIC ID to our logical CPU ID
148	 * so we can locate our x86_cpuboot[] bundle. Put it in EBP.
149	 */
150
151	movl LOAPIC_BASE_ADDRESS+LOAPIC_ID, %eax
152	shrl $24, %eax
153	andl $0xFF, %eax		/* local APIC ID -> EAX */
154
155	movl $x86_cpuboot, %ebp
156	xorl %ebx, %ebx
1571:	cmpl $CONFIG_MP_MAX_NUM_CPUS, %ebx
158	jz unknown_loapic_id
159	cmpb %al, x86_cpu_loapics(%ebx)
160	je go64				/* proceed to 64-bit mode */
161	incl %ebx
162	addl $__X86_CPUBOOT_SIZEOF, %ebp
163	jmp 1b
164
165unknown_loapic_id:
166	jmp unknown_loapic_id
167
168.code32
169.globl __start32
170__start32:
171	/*
172	 * kernel execution begins here in 32-bit mode, with flat-mode
173	 * descriptors in all segment registers, interrupts disabled.
174	 */
175
176	/* See note above, re: OUTRAGEOUS HACK */
177	movl $__start, %ebp
178	movb $0x90, 0(%ebp)
179	movb $0x90, 1(%ebp)
180	movb $0x90, 2(%ebp)
181	movb $0x90, 3(%ebp)
182	movb $0x90, 4(%ebp)
183	movb $0x90, 5(%ebp)
184	movb $0x90, 6(%ebp)
185	movb $0x90, 7(%ebp)
186	wbinvd
187
188	lgdt gdt48
189	lidt idt48
190
191#include "../common.S"
192
193	/*
194	 * N.B.: if multiboot info struct is present, "common.S"
195	 * has left a pointer to it in EBX.
196	 */
197
198	movl $x86_cpu_boot_arg, %ebp
199
200	/* Inserting boot type */
201	movl $MULTIBOOT_BOOT_TYPE, __x86_boot_arg_t_boot_type_OFFSET(%ebp)
202	/* and multiboot info */
203	movl %ebx, __x86_boot_arg_t_arg_OFFSET(%ebp)
204
205	movl $x86_cpuboot, %ebp         /* BSP is always logical CPU id 0 */
206
207go64:	/* Install page tables and transition to long mode */
208	install_pagetables_32
209	jmpl $X86_KERNEL_CS, $enter_code64
210
211	/* Long mode entry point.  Arrive here from the code
212	 * immediately above (shared between main CPU startup and AP
213	 * startup), or from EFI entry in __start64.
214	 *
215	 * Here we reload the segment registers,
216	 * and configure per-CPU stuff: GS, task register, stack.
217	 */
218	.code64
219enter_code64:
220	movl $X86_KERNEL_DS, %eax
221	movw %ax, %ds
222	movw %ax, %es
223	movw %ax, %ss
224	movw %ax, %fs
225
226	/* On Intel processors, if GS is not zero and is being set to
227	 * zero, GS_BASE is also being set to zero. This would interfere
228	 * with the actual use of GS_BASE for usespace. To avoid accidentally
229	 * clearing GS_BASE, simply set GS to 0 at boot, so any subsequent
230	 * clearing of GS will not clear GS_BASE.
231	 */
232	mov $0, %eax
233	movw %ax, %gs
234
235	movw __x86_cpuboot_t_tr_OFFSET(%rbp), %ax
236	ltr %ax
237
238	/* Set up MSRs for GS / KERNEL_GS base */
239	movq __x86_cpuboot_t_gs_base_OFFSET(%rbp), %rax
240	movq %rax, %rdx
241	shrq $32, %rdx
242	/* X86_KERNEL_GS_BASE and X86_GS_BASE are swapped by the 'swapgs'
243	 * instruction.
244	 */
245	movl $X86_KERNEL_GS_BASE, %ecx
246	wrmsr
247	/* X86_GS_BASE shadows base fields of %gs, effectively setting %gs */
248	movl $X86_GS_BASE, %ecx
249	wrmsr
250
251	movq __x86_cpuboot_t_sp_OFFSET(%rbp), %rsp
252	movq %rsp, %gs:__x86_tss64_t_ist1_OFFSET
253
254	/* finally, complete environment for the C runtime and go. */
255	cld	/* GCC presumes a clear direction flag */
256
257#ifdef CONFIG_INIT_STACKS
258	movq $0xAAAAAAAAAAAAAAAA, %rax
259	movq %rsp, %rdi
260	subq __x86_cpuboot_t_stack_size_OFFSET(%rbp), %rdi
261	movq __x86_cpuboot_t_stack_size_OFFSET(%rbp), %rcx
262	shr $3, %rcx /* moving 8 bytes a time, so fewer repeats */
263	rep stosq
264#endif
265
266#ifdef CONFIG_STACK_CANARIES_TLS
267	movq %rsp, %rdi
268	pushq %rsp
269	call z_x86_early_tls_update_gdt
270	popq %rsp
271#endif
272
273	/* Enter C domain now that we have a stack set up, never to return */
274	movq %rbp, %rdi
275
276	call z_x86_cpu_init
277
278	/* 64 bit OS entry point, used by EFI support.  UEFI
279	 * guarantees an identity-mapped page table that covers
280	 * physical memory, and the loader stub already used it to
281	 * write all of the Zephyr image, so we know it works for what
282	 * we need.  Other things need fixups to match what multiboot
283	 * 32 bit startup does.
284	 */
285.globl __start64
286__start64:
287	/* Zero the TSC */
288	xorq %rax, %rax
289	xorq %rdx, %rdx
290	movq $X86_TIME_STAMP_COUNTER_MSR, %rcx
291	wrmsr
292
293	lidt idt80
294	lgdt gdt80
295
296	install_pagetables_64
297
298	/* Disable 8259 PIT.  Almost certainly not needed on modern
299	 * UEFI platforms taking this code path, but...
300	 */
301	movb $0xff, %al
302	outb %al, $0x21
303	outb %al, $0xA1
304
305	movq $x86_cpu_boot_arg, %rbp
306	/* Inserting boot type */
307	movq $EFI_BOOT_TYPE, __x86_boot_arg_t_boot_type_OFFSET(%rbp)
308	/* and EFI boot arg (if any) */
309	movq %rbx, __x86_boot_arg_t_arg_OFFSET(%rbp)
310
311	movq $x86_cpuboot, %rbp		/* BSP is always logical CPU id 0 */
312
313	mov jmpdesc, %rax
314	jmp *%rax
315jmpdesc:
316	.quad enter_code64
317	.short X86_KERNEL_CS
318
319/*
320 * void x86_sse_init(struct k_thread *thread);
321 *
322 * Initialize floating-point state to something sane. If 'thread' is
323 * not NULL, then the resulting FP state is saved to thread->arch.sse.
324 */
325
326.global x86_sse_init
327x86_sse_init:
328	fninit
329	ldmxcsr mxcsr
330	testq %rdi, %rdi
331	jz 1f
332	fxsave _thread_offset_to_sse(%rdi)
3331:	retq
334
335mxcsr:	.long X86_MXCSR_SANE
336
337/*
338 * void z_x86_switch(void *switch_to, void **switched_from);
339 *
340 * Note that switch_handle for us is simply a pointer to the containing
341 * 'struct k_thread', thus:
342 *
343 * RDI = (struct k_thread *) switch_to
344 * RSI = (struct k_thread **) address of output thread switch_handle field
345 */
346
347.globl z_x86_switch
348z_x86_switch:
349	/* RSI contains the switch_handle field to which we are
350	 * notionally supposed to store.  Offset it to get back to the
351	 * thread handle instead.
352	 */
353	subq $___thread_t_switch_handle_OFFSET, %rsi
354
355	andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi)
356
357	popq %rax
358	movq %rax, _thread_offset_to_rip(%rsi)
359	pushfq
360	popq %rax
361	movq %rax, _thread_offset_to_rflags(%rsi)
362	movq %rsp, _thread_offset_to_rsp(%rsi)
363	movq %rbx, _thread_offset_to_rbx(%rsi)
364	movq %rbp, _thread_offset_to_rbp(%rsi)
365	movq %r12, _thread_offset_to_r12(%rsi)
366	movq %r13, _thread_offset_to_r13(%rsi)
367	movq %r14, _thread_offset_to_r14(%rsi)
368	movq %r15, _thread_offset_to_r15(%rsi)
369#ifdef CONFIG_USERSPACE
370	/* We're always in supervisor mode if we get here, the other case
371	 * is when __resume is invoked from irq_dispatch
372	 */
373	movq $X86_KERNEL_CS, _thread_offset_to_cs(%rsi)
374	movq $X86_KERNEL_DS, _thread_offset_to_ss(%rsi)
375#endif
376	/* Store the handle (i.e. our thread struct address) into the
377	 * switch handle field, this is a synchronization signal that
378	 * must occur after the last data from the old context is
379	 * saved.
380	 */
381	movq %rsi, ___thread_t_switch_handle_OFFSET(%rsi)
382
383	movq %gs:__x86_tss64_t_ist1_OFFSET, %rsp
384
385	/* fall through to __resume */
386
387/*
388 * Entry:
389 *   RSP = top of CPU interrupt stack
390 *   RDI = (struct k_thread *) thread to resume
391 */
392
393__resume:
394#ifdef CONFIG_THREAD_LOCAL_STORAGE
395	/*
396	 * Write the TLS base pointer to FS_BASE MSR,
397	 * where GCC emits code to access TLS data via
398	 * offset to FS.
399	 * Since wrmsr write EDX:EAX to MSR indicated by
400	 * ECX, the high 32-bit needs to be loaded into
401	 * RDX and right shifted by 32 bits so EDX has
402	 * the higher 32-bit value.
403	 */
404	movl $X86_FS_BASE, %ecx
405	movq _thread_offset_to_tls(%rdi), %rax
406	movq _thread_offset_to_tls(%rdi), %rdx
407	shrq $32, %rdx
408	wrmsr
409#endif
410#if (!defined(CONFIG_X86_KPTI) && defined(CONFIG_USERSPACE)) \
411		|| defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
412	pushq %rdi	/* Caller-saved, stash it */
413#if !defined(CONFIG_X86_KPTI) && defined(CONFIG_USERSPACE)
414	/* If KPTI is enabled we're always on the kernel's page tables in
415	 * this context and the appropriate page table switch takes place
416	 * when trampolining back to user mode
417	 */
418	call z_x86_swap_update_page_tables
419#endif
420#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
421	call z_thread_mark_switched_in
422#endif
423	popq %rdi
424#endif /* (!CONFIG_X86_KPTI && CONFIG_USERSPACE) || \
425	   CONFIG_INSTRUMENT_THREAD_SWITCHING */
426
427#ifdef CONFIG_USERSPACE
428	/* Set up exception return stack frame */
429	pushq _thread_offset_to_ss(%rdi)	/* SS */
430#else
431	pushq $X86_KERNEL_DS			/* SS */
432#endif /* CONFIG_USERSPACE */
433	pushq _thread_offset_to_rsp(%rdi)	/* RSP */
434	pushq _thread_offset_to_rflags(%rdi)	/* RFLAGS */
435#ifdef CONFIG_USERSPACE
436	pushq _thread_offset_to_cs(%rdi)	/* CS */
437#else
438	pushq $X86_KERNEL_CS			/* CS */
439#endif
440	pushq _thread_offset_to_rip(%rdi)	/* RIP */
441
442#ifdef CONFIG_ASSERT
443	/* Poison the old thread's saved RIP pointer with a
444	 * recognizable value near NULL, to easily catch reuse of the
445	 * thread object across CPUs in SMP.  Strictly speaking this
446	 * is not an assertion, but it's very cheap and worth having
447	 * on during routine testing.
448	 */
449	movq $0xB9, _thread_offset_to_rip(%rdi)
450#endif
451
452
453	movq _thread_offset_to_rbx(%rdi), %rbx
454	movq _thread_offset_to_rbp(%rdi), %rbp
455	movq _thread_offset_to_r12(%rdi), %r12
456	movq _thread_offset_to_r13(%rdi), %r13
457	movq _thread_offset_to_r14(%rdi), %r14
458	movq _thread_offset_to_r15(%rdi), %r15
459#ifdef CONFIG_USERSPACE
460	/* Set correct privilege elevation stack to manually switch to in
461	 * z_x86_syscall_entry_stub()
462	 */
463	movq _thread_offset_to_psp(%rdi), %rax
464	movq %rax, %gs:__x86_tss64_t_psp_OFFSET
465#endif
466
467	testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rdi)
468	jz 1f
469
470	fxrstor _thread_offset_to_sse(%rdi)
471	movq _thread_offset_to_rax(%rdi), %rax
472	movq _thread_offset_to_rcx(%rdi), %rcx
473	movq _thread_offset_to_rdx(%rdi), %rdx
474	movq _thread_offset_to_rsi(%rdi), %rsi
475	movq _thread_offset_to_r8(%rdi), %r8
476	movq _thread_offset_to_r9(%rdi), %r9
477	movq _thread_offset_to_r10(%rdi), %r10
478	movq _thread_offset_to_r11(%rdi), %r11
479	movq _thread_offset_to_rdi(%rdi), %rdi  /* do last :-) */
480
481#ifdef CONFIG_USERSPACE
482	/* Swap GS register values if we are returning to user mode */
483	testb $0x3, 8(%rsp)
484	jz 1f
485#ifdef CONFIG_X86_KPTI
486	jmp z_x86_trampoline_to_user
487#else
488	swapgs
489#endif /* CONFIG_X86_KPTI */
490#endif /* CONFIG_USERSPACE */
4911:
492#ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION
493	/* swapgs variant of Spectre V1. Disable speculation past this point */
494	lfence
495#endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */
496	iretq
497
498
499#ifdef CONFIG_X86_KPTI
500#define EXCEPT_CODE(nr, ist)						\
501	vector_ ## nr: pushq %gs:__x86_tss64_t_ist ## ist ## _OFFSET;	\
502	pushq $nr;							\
503	jmp except
504#define EXCEPT(nr, ist)							\
505	vector_ ## nr: pushq $0;					\
506	pushq %gs:__x86_tss64_t_ist ## ist ## _OFFSET;			\
507	pushq $nr; \
508	jmp except
509#else
510#define EXCEPT_CODE(nr) vector_ ## nr: pushq $nr; jmp except
511#define EXCEPT(nr)	vector_ ## nr: pushq $0; pushq $nr; jmp except
512#endif
513
514/*
515 * When we arrive at 'except' from one of the EXCEPT(X) stubs,
516 * we're on the exception stack with irqs unlocked (or the trampoline stack
517 * with irqs locked if KPTI is enabled) and it contains:
518 *
519 *   SS
520 *   RSP
521 *   RFLAGS
522 *   CS
523 *   RIP
524 *   Error Code if pushed by CPU, else 0
525 *   IST index in TSS
526 *   Vector number <- RSP points here
527 *
528 */
529
530except: /*
531	 * finish struct NANO_ESF on stack. 'vector' .. 'ss' are
532	 * already there from hardware trap and EXCEPT_*() stub.
533	 */
534
535	pushq %r11
536
537#ifdef CONFIG_USERSPACE
538	/* Swap GS register values and page tables if we came from user mode */
539	testb $0x3, 40(%rsp)
540	jz 1f
541	swapgs
542#ifdef CONFIG_X86_KPTI
543	/* Load kernel's page table. NOTE: Presumes phys=virt */
544	movq $z_x86_kernel_ptables, %r11
545	movq %r11, %cr3
546#endif /* CONFIG_X86_KPTI */
5471:
548#ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION
549	/* swapgs variant of Spectre V1. Disable speculation past this point */
550	lfence
551#endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */
552#ifdef CONFIG_X86_KPTI
553	/* Save old trampoline stack pointer in R11 */
554	movq %rsp, %r11
555
556	/* Switch to the correct stack */
557	movq 16(%r11), %rsp
558
559	/* Transplant trampoline stack contents */
560	pushq 64(%r11)	/* SS */
561	pushq 56(%r11)	/* RSP */
562	pushq 48(%r11)	/* RFLAGS */
563	pushq 40(%r11)	/* CS */
564	pushq 32(%r11)	/* RIP */
565	pushq 24(%r11)	/* Error code */
566	pushq 8(%r11)	/* Vector */
567	pushq (%r11)	/* Stashed R11 */
568	movq $0, (%r11) /* Cover our tracks */
569
570	/* We're done, it's safe to re-enable interrupts. */
571	sti
572#endif /* CONFIG_X86_KPTI */
573#endif /* CONFIG_USERSPACE */
574
575	/* In addition to r11, push the rest of the caller-saved regs */
576	/* Positioning of this fxsave is important, RSP must be 16-byte
577	 * aligned
578	 */
579	subq $X86_FXSAVE_SIZE, %rsp
580	fxsave (%rsp)
581	pushq %r10
582	pushq %r9
583	pushq %r8
584	pushq %rdi
585	pushq %rsi
586	pushq %rdx
587	pushq %rcx
588	pushq %rax
589	pushq %rbp
590#ifdef CONFIG_EXCEPTION_DEBUG
591	/* Callee saved regs */
592	pushq %r15
593	pushq %r14
594	pushq %r13
595	pushq %r12
596	pushq %rbx
597#endif /* CONFIG_EXCEPTION_DEBUG */
598	movq %rsp, %rdi
599
600	call z_x86_exception
601
602	/* If we returned, the exception was handled successfully and the
603	 * thread may resume (the pushed RIP may have been modified)
604	 */
605#ifdef CONFIG_EXCEPTION_DEBUG
606	popq %rbx
607	popq %r12
608	popq %r13
609	popq %r14
610	popq %r15
611#endif /* CONFIG_EXCEPTION_DEBUG */
612	popq %rbp
613	popq %rax
614	popq %rcx
615	popq %rdx
616	popq %rsi
617	popq %rdi
618	popq %r8
619	popq %r9
620	popq %r10
621	fxrstor (%rsp)
622	addq $X86_FXSAVE_SIZE, %rsp
623	popq %r11
624
625	/* Drop the vector/err code pushed by the HW or EXCEPT_*() stub */
626	add $16, %rsp
627
628#ifdef CONFIG_USERSPACE
629	/* Swap GS register values if we are returning to user mode */
630	testb $0x3, 8(%rsp)
631	jz 1f
632	cli
633#ifdef CONFIG_X86_KPTI
634	jmp z_x86_trampoline_to_user
635#else
636	swapgs
637#endif /* CONFIG_X86_KPTI */
6381:
639#endif /* CONFIG_USERSPACE */
640
641	iretq
642
643#ifdef CONFIG_X86_KPTI
644EXCEPT      ( 0, 7); EXCEPT      ( 1, 7); EXCEPT       (2, 6); EXCEPT      ( 3, 7)
645EXCEPT      ( 4, 7); EXCEPT      ( 5, 7); EXCEPT       (6, 7); EXCEPT      ( 7, 7)
646EXCEPT_CODE ( 8, 7); EXCEPT      ( 9, 7); EXCEPT_CODE (10, 7); EXCEPT_CODE (11, 7)
647EXCEPT_CODE (12, 7); EXCEPT_CODE (13, 7); EXCEPT_CODE (14, 7); EXCEPT      (15, 7)
648EXCEPT      (16, 7); EXCEPT_CODE (17, 7); EXCEPT      (18, 7); EXCEPT      (19, 7)
649EXCEPT      (20, 7); EXCEPT      (21, 7); EXCEPT      (22, 7); EXCEPT      (23, 7)
650EXCEPT      (24, 7); EXCEPT      (25, 7); EXCEPT      (26, 7); EXCEPT      (27, 7)
651EXCEPT      (28, 7); EXCEPT      (29, 7); EXCEPT      (30, 7); EXCEPT      (31, 7)
652
653/* Vector reserved for handling a kernel oops; treat as an exception
654 * and not an interrupt
655 */
656EXCEPT(Z_X86_OOPS_VECTOR, 7);
657#else
658EXCEPT      ( 0); EXCEPT      ( 1); EXCEPT      ( 2); EXCEPT      ( 3)
659EXCEPT      ( 4); EXCEPT      ( 5); EXCEPT      ( 6); EXCEPT      ( 7)
660EXCEPT_CODE ( 8); EXCEPT      ( 9); EXCEPT_CODE (10); EXCEPT_CODE (11)
661EXCEPT_CODE (12); EXCEPT_CODE (13); EXCEPT_CODE (14); EXCEPT      (15)
662EXCEPT      (16); EXCEPT_CODE (17); EXCEPT      (18); EXCEPT      (19)
663EXCEPT      (20); EXCEPT      (21); EXCEPT      (22); EXCEPT      (23)
664EXCEPT      (24); EXCEPT      (25); EXCEPT      (26); EXCEPT      (27)
665EXCEPT      (28); EXCEPT      (29); EXCEPT      (30); EXCEPT      (31)
666
667/* Vector reserved for handling a kernel oops; treat as an exception
668 * and not an interrupt
669 */
670EXCEPT(Z_X86_OOPS_VECTOR);
671#endif /* CONFIG_X86_KPTI */
672
673/*
674 * When we arrive at 'irq' from one of the IRQ(X) stubs,
675 * we're on the "freshest" IRQ stack (or the trampoline stack if we came from
676 * user mode and KPTI is enabled) and it contains:
677 *
678 *   SS
679 *   RSP
680 *   RFLAGS
681 *   CS
682 *   RIP
683 *   (vector number - IV_IRQS) <-- RSP points here
684 */
685
686.globl x86_irq_funcs	/* see irq_manage.c .. */
687.globl x86_irq_args	/* .. for these definitions */
688
689irq:
690	pushq %rsi
691
692#ifdef CONFIG_USERSPACE
693	/* Swap GS register values if we came in from user mode */
694	testb $0x3, 24(%rsp)
695	jz 1f
696	swapgs
697#ifdef CONFIG_X86_KPTI
698	/* Load kernel's page table. NOTE: presumes phys=virt */
699	movq $z_x86_kernel_ptables, %rsi
700	movq %rsi, %cr3
701#endif /* CONFIG_X86_KPTI */
7021:
703#ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION
704	/* swapgs variant of Spectre V1. Disable speculation past this point */
705	lfence
706#endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */
707#ifdef CONFIG_X86_KPTI
708	/* Save old trampoline stack pointer in RSI */
709	movq %rsp, %rsi
710
711	/* Switch to the interrupt stack stack */
712	movq %gs:__x86_tss64_t_ist1_OFFSET, %rsp
713
714	/* Transplant trampoline stack contents */
715	pushq 48(%rsi)	/* SS */
716	pushq 40(%rsi)	/* RSP */
717	pushq 32(%rsi)	/* RFLAGS */
718	pushq 24(%rsi)	/* CS */
719	pushq 16(%rsi)	/* RIP */
720	pushq 8(%rsi)	/* Vector */
721	pushq (%rsi)	/* Stashed RSI value */
722	movq $0, (%rsi) /* Cover our tracks, stashed RSI might be sensitive */
723#endif /* CONFIG_X86_KPTI */
724#endif /* CONFIG_USERSPACE */
725
726	movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
727
728	/*
729	 * Bump the IRQ nesting count and move to the next IRQ stack.
730	 * That's sufficient to safely re-enable interrupts, so if we
731	 * haven't reached the maximum nesting depth yet, do it.
732	 */
733
734	incl ___cpu_t_nested_OFFSET(%rsi)
735	subq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET
736	cmpl $CONFIG_ISR_DEPTH, ___cpu_t_nested_OFFSET(%rsi)
737	jz 1f
738	sti
7391:	cmpl $1, ___cpu_t_nested_OFFSET(%rsi)
740	je irq_enter_unnested
741
742	/*
743	 * if we're a nested interrupt, we have to dump the state to the
744	 * stack. we play some games here to re-arrange the stack thusly:
745	 *
746	 * SS RSP RFLAGS CS RIP RAX RSI
747	 * RCX RDX RDI R8 R9 R10 R11
748	 * X86_FXSAVE_SIZE bytes of SSE data <-- RSP points here
749	 *
750	 * note that the final value of RSP must be 16-byte aligned here,
751	 * both to satisfy FXSAVE/FXRSTOR but also to honor the C ABI.
752	 */
753
754irq_enter_nested: /* Nested IRQ: dump register state to stack. */
755	pushq %rcx
756	movq 16(%rsp), %rcx /* RCX = vector */
757	movq %rax, 16(%rsp) /* looks like we pushed RAX, not the vector */
758	pushq %rdx
759	pushq %rdi
760	pushq %r8
761	pushq %r9
762	pushq %r10
763	pushq %r11
764	subq $X86_FXSAVE_SIZE, %rsp
765	fxsave (%rsp)
766	jmp irq_dispatch
767
768irq_enter_unnested: /* Not nested: dump state to thread struct for __resume */
769	movq ___cpu_t_current_OFFSET(%rsi), %rsi
770	orb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi)
771	fxsave _thread_offset_to_sse(%rsi)
772	movq %rbx, _thread_offset_to_rbx(%rsi)
773	movq %rbp, _thread_offset_to_rbp(%rsi)
774	movq %r12, _thread_offset_to_r12(%rsi)
775	movq %r13, _thread_offset_to_r13(%rsi)
776	movq %r14, _thread_offset_to_r14(%rsi)
777	movq %r15, _thread_offset_to_r15(%rsi)
778	movq %rax, _thread_offset_to_rax(%rsi)
779	movq %rcx, _thread_offset_to_rcx(%rsi)
780	movq %rdx, _thread_offset_to_rdx(%rsi)
781	movq %rdi, _thread_offset_to_rdi(%rsi)
782	movq %r8, _thread_offset_to_r8(%rsi)
783	movq %r9, _thread_offset_to_r9(%rsi)
784	movq %r10, _thread_offset_to_r10(%rsi)
785	movq %r11, _thread_offset_to_r11(%rsi)
786	popq %rax /* RSI */
787	movq %rax, _thread_offset_to_rsi(%rsi)
788	popq %rcx /* vector number */
789	popq %rax /* RIP */
790	movq %rax, _thread_offset_to_rip(%rsi)
791	popq %rax /* CS */
792#ifdef CONFIG_USERSPACE
793	movq %rax, _thread_offset_to_cs(%rsi)
794#endif
795	popq %rax /* RFLAGS */
796	movq %rax, _thread_offset_to_rflags(%rsi)
797	popq %rax /* RSP */
798	movq %rax, _thread_offset_to_rsp(%rsi)
799	popq %rax /* SS */
800#ifdef CONFIG_USERSPACE
801	movq %rax, _thread_offset_to_ss(%rsi)
802#endif
803
804irq_dispatch:
805#ifdef CONFIG_SCHED_THREAD_USAGE
806	pushq %rcx
807	call z_sched_usage_stop
808	popq %rcx
809#endif
810	movq x86_irq_funcs(,%rcx,8), %rax
811	movq x86_irq_args(,%rcx,8), %rdi
812	call *%rax
813
814	xorq %rax, %rax
815#ifdef CONFIG_X2APIC
816	xorl %edx, %edx
817	movl $(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx
818	wrmsr
819#else /* xAPIC */
820	movq Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %rdx
821	movl %eax, LOAPIC_EOI(%rdx)
822#endif /* CONFIG_X2APIC */
823
824	movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
825
826	cli
827	addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET
828	decl ___cpu_t_nested_OFFSET(%rsi)
829	jnz irq_exit_nested
830
831	/* not nested; ask the scheduler who's up next and resume it */
832
833	movq ___cpu_t_current_OFFSET(%rsi), %rdi
834	call z_get_next_switch_handle
835	movq %rax, %rdi
836	jmp __resume
837
838irq_exit_nested:
839	fxrstor (%rsp)
840	addq $X86_FXSAVE_SIZE, %rsp
841	popq %r11
842	popq %r10
843	popq %r9
844	popq %r8
845	popq %rdi
846	popq %rdx
847	popq %rcx
848	popq %rsi
849	popq %rax
850	iretq
851
852#define IRQ(nr) vector_ ## nr: pushq $(nr - IV_IRQS); jmp irq
853
854IRQ( 33); IRQ( 34); IRQ( 35); IRQ( 36); IRQ( 37); IRQ( 38); IRQ( 39)
855IRQ( 40); IRQ( 41); IRQ( 42); IRQ( 43); IRQ( 44); IRQ( 45); IRQ( 46); IRQ( 47)
856IRQ( 48); IRQ( 49); IRQ( 50); IRQ( 51); IRQ( 52); IRQ( 53); IRQ( 54); IRQ( 55)
857IRQ( 56); IRQ( 57); IRQ( 58); IRQ( 59); IRQ( 60); IRQ( 61); IRQ( 62); IRQ( 63)
858IRQ( 64); IRQ( 65); IRQ( 66); IRQ( 67); IRQ( 68); IRQ( 69); IRQ( 70); IRQ( 71)
859IRQ( 72); IRQ( 73); IRQ( 74); IRQ( 75); IRQ( 76); IRQ( 77); IRQ( 78); IRQ( 79)
860IRQ( 80); IRQ( 81); IRQ( 82); IRQ( 83); IRQ( 84); IRQ( 85); IRQ( 86); IRQ( 87)
861IRQ( 88); IRQ( 89); IRQ( 90); IRQ( 91); IRQ( 92); IRQ( 93); IRQ( 94); IRQ( 95)
862IRQ( 96); IRQ( 97); IRQ( 98); IRQ( 99); IRQ(100); IRQ(101); IRQ(102); IRQ(103)
863IRQ(104); IRQ(105); IRQ(106); IRQ(107); IRQ(108); IRQ(109); IRQ(110); IRQ(111)
864IRQ(112); IRQ(113); IRQ(114); IRQ(115); IRQ(116); IRQ(117); IRQ(118); IRQ(119)
865IRQ(120); IRQ(121); IRQ(122); IRQ(123); IRQ(124); IRQ(125); IRQ(126); IRQ(127)
866IRQ(128); IRQ(129); IRQ(130); IRQ(131); IRQ(132); IRQ(133); IRQ(134); IRQ(135)
867IRQ(136); IRQ(137); IRQ(138); IRQ(139); IRQ(140); IRQ(141); IRQ(142); IRQ(143)
868IRQ(144); IRQ(145); IRQ(146); IRQ(147); IRQ(148); IRQ(149); IRQ(150); IRQ(151)
869IRQ(152); IRQ(153); IRQ(154); IRQ(155); IRQ(156); IRQ(157); IRQ(158); IRQ(159)
870IRQ(160); IRQ(161); IRQ(162); IRQ(163); IRQ(164); IRQ(165); IRQ(166); IRQ(167)
871IRQ(168); IRQ(169); IRQ(170); IRQ(171); IRQ(172); IRQ(173); IRQ(174); IRQ(175)
872IRQ(176); IRQ(177); IRQ(178); IRQ(179); IRQ(180); IRQ(181); IRQ(182); IRQ(183)
873IRQ(184); IRQ(185); IRQ(186); IRQ(187); IRQ(188); IRQ(189); IRQ(190); IRQ(191)
874IRQ(192); IRQ(193); IRQ(194); IRQ(195); IRQ(196); IRQ(197); IRQ(198); IRQ(199)
875IRQ(200); IRQ(201); IRQ(202); IRQ(203); IRQ(204); IRQ(205); IRQ(206); IRQ(207)
876IRQ(208); IRQ(209); IRQ(210); IRQ(211); IRQ(212); IRQ(213); IRQ(214); IRQ(215)
877IRQ(216); IRQ(217); IRQ(218); IRQ(219); IRQ(220); IRQ(221); IRQ(222); IRQ(223)
878IRQ(224); IRQ(225); IRQ(226); IRQ(227); IRQ(228); IRQ(229); IRQ(230); IRQ(231)
879IRQ(232); IRQ(233); IRQ(234); IRQ(235); IRQ(236); IRQ(237); IRQ(238); IRQ(239)
880IRQ(240); IRQ(241); IRQ(242); IRQ(243); IRQ(244); IRQ(245); IRQ(246); IRQ(247)
881IRQ(248); IRQ(249); IRQ(250); IRQ(251); IRQ(252); IRQ(253); IRQ(254); IRQ(255)
882
883.section .lorodata,"a"
884
885/*
886 * IDT.
887 */
888
889/* Descriptor type. Traps don't implicitly disable interrupts. User variants
890 * can be invoked by software running in user mode (ring 3).
891 *
892 * For KPTI everything lands on the trampoline stack and we must get off of
893 * it before re-enabling interrupts; use interrupt gates for everything.
894 */
895#define INTR		0x8e
896#define USER_INTR	0xee
897#ifdef CONFIG_X86_KPTI
898#define TRAP		INTR
899#define USER_TRAP	UINTR
900#else
901#define TRAP		0x8f
902#define USER_TRAP	0xef
903#endif
904
905#define IDT(nr, type, ist) \
906	.word vector_ ## nr, X86_KERNEL_CS; \
907	.byte ist, type; \
908	.word 0, 0, 0, 0, 0
909
910/* Which IST entry in TSS to use for automatic stack switching, or 0 if
911 * no automatic switch is to take place. Stack page must be present in
912 * the current page tables, if KPTI is on only the trampoline stack and
913 * the current user stack can be accessed.
914 */
915#ifdef CONFIG_X86_KPTI
916/* Everything lands on ist2, which is set to the trampoline stack.
917 * Interrupt/exception entry updates page tables and manually switches to
918 * the irq/exception stacks stored in ist1/ist7
919 */
920#define	IRQ_STACK	2
921#define EXC_STACK	2
922#define BAD_STACK	2
923#define NMI_STACK	2
924#else
925#define	IRQ_STACK	1
926#define NMI_STACK	6 /* NMI stack */
927#define EXC_STACK	7
928#define BAD_STACK	7 /* Horrible things: double faults, MCEs */
929#endif
930
931.align 16
932idt:
933	IDT(  0, TRAP, EXC_STACK); IDT(  1, TRAP, EXC_STACK)
934	IDT(  2, TRAP, NMI_STACK); IDT(  3, TRAP, EXC_STACK)
935	IDT(  4, TRAP, EXC_STACK); IDT(  5, TRAP, EXC_STACK)
936	IDT(  6, TRAP, EXC_STACK); IDT(  7, TRAP, EXC_STACK)
937	IDT(  8, TRAP, BAD_STACK); IDT(  9, TRAP, EXC_STACK)
938	IDT( 10, TRAP, EXC_STACK); IDT( 11, TRAP, EXC_STACK)
939	IDT( 12, TRAP, EXC_STACK); IDT( 13, TRAP, EXC_STACK)
940	IDT( 14, TRAP, EXC_STACK); IDT( 15, TRAP, EXC_STACK)
941	IDT( 16, TRAP, EXC_STACK); IDT( 17, TRAP, EXC_STACK)
942	IDT( 18, TRAP, BAD_STACK); IDT( 19, TRAP, EXC_STACK)
943	IDT( 20, TRAP, EXC_STACK); IDT( 21, TRAP, EXC_STACK)
944	IDT( 22, TRAP, EXC_STACK); IDT( 23, TRAP, EXC_STACK)
945	IDT( 24, TRAP, EXC_STACK); IDT( 25, TRAP, EXC_STACK)
946	IDT( 26, TRAP, EXC_STACK); IDT( 27, TRAP, EXC_STACK)
947	IDT( 28, TRAP, EXC_STACK); IDT( 29, TRAP, EXC_STACK)
948	IDT( 30, TRAP, EXC_STACK); IDT( 31, TRAP, EXC_STACK)
949
950	/* Oops vector can be invoked from Ring 3 and runs on exception stack */
951	IDT(Z_X86_OOPS_VECTOR, USER_INTR, EXC_STACK); IDT( 33, INTR, IRQ_STACK)
952	IDT( 34, INTR, IRQ_STACK); IDT( 35, INTR, IRQ_STACK)
953	IDT( 36, INTR, IRQ_STACK); IDT( 37, INTR, IRQ_STACK)
954	IDT( 38, INTR, IRQ_STACK); IDT( 39, INTR, IRQ_STACK)
955	IDT( 40, INTR, IRQ_STACK); IDT( 41, INTR, IRQ_STACK)
956	IDT( 42, INTR, IRQ_STACK); IDT( 43, INTR, IRQ_STACK)
957	IDT( 44, INTR, IRQ_STACK); IDT( 45, INTR, IRQ_STACK)
958	IDT( 46, INTR, IRQ_STACK); IDT( 47, INTR, IRQ_STACK)
959	IDT( 48, INTR, IRQ_STACK); IDT( 49, INTR, IRQ_STACK)
960	IDT( 50, INTR, IRQ_STACK); IDT( 51, INTR, IRQ_STACK)
961	IDT( 52, INTR, IRQ_STACK); IDT( 53, INTR, IRQ_STACK)
962	IDT( 54, INTR, IRQ_STACK); IDT( 55, INTR, IRQ_STACK)
963	IDT( 56, INTR, IRQ_STACK); IDT( 57, INTR, IRQ_STACK)
964	IDT( 58, INTR, IRQ_STACK); IDT( 59, INTR, IRQ_STACK)
965	IDT( 60, INTR, IRQ_STACK); IDT( 61, INTR, IRQ_STACK)
966	IDT( 62, INTR, IRQ_STACK); IDT( 63, INTR, IRQ_STACK)
967	IDT( 64, INTR, IRQ_STACK); IDT( 65, INTR, IRQ_STACK)
968	IDT( 66, INTR, IRQ_STACK); IDT( 67, INTR, IRQ_STACK)
969	IDT( 68, INTR, IRQ_STACK); IDT( 69, INTR, IRQ_STACK)
970	IDT( 70, INTR, IRQ_STACK); IDT( 71, INTR, IRQ_STACK)
971	IDT( 72, INTR, IRQ_STACK); IDT( 73, INTR, IRQ_STACK)
972	IDT( 74, INTR, IRQ_STACK); IDT( 75, INTR, IRQ_STACK)
973	IDT( 76, INTR, IRQ_STACK); IDT( 77, INTR, IRQ_STACK)
974	IDT( 78, INTR, IRQ_STACK); IDT( 79, INTR, IRQ_STACK)
975	IDT( 80, INTR, IRQ_STACK); IDT( 81, INTR, IRQ_STACK)
976	IDT( 82, INTR, IRQ_STACK); IDT( 83, INTR, IRQ_STACK)
977	IDT( 84, INTR, IRQ_STACK); IDT( 85, INTR, IRQ_STACK)
978	IDT( 86, INTR, IRQ_STACK); IDT( 87, INTR, IRQ_STACK)
979	IDT( 88, INTR, IRQ_STACK); IDT( 89, INTR, IRQ_STACK)
980	IDT( 90, INTR, IRQ_STACK); IDT( 91, INTR, IRQ_STACK)
981	IDT( 92, INTR, IRQ_STACK); IDT( 93, INTR, IRQ_STACK)
982	IDT( 94, INTR, IRQ_STACK); IDT( 95, INTR, IRQ_STACK)
983	IDT( 96, INTR, IRQ_STACK); IDT( 97, INTR, IRQ_STACK)
984	IDT( 98, INTR, IRQ_STACK); IDT( 99, INTR, IRQ_STACK)
985	IDT(100, INTR, IRQ_STACK); IDT(101, INTR, IRQ_STACK)
986	IDT(102, INTR, IRQ_STACK); IDT(103, INTR, IRQ_STACK)
987	IDT(104, INTR, IRQ_STACK); IDT(105, INTR, IRQ_STACK)
988	IDT(106, INTR, IRQ_STACK); IDT(107, INTR, IRQ_STACK)
989	IDT(108, INTR, IRQ_STACK); IDT(109, INTR, IRQ_STACK)
990	IDT(110, INTR, IRQ_STACK); IDT(111, INTR, IRQ_STACK)
991	IDT(112, INTR, IRQ_STACK); IDT(113, INTR, IRQ_STACK)
992	IDT(114, INTR, IRQ_STACK); IDT(115, INTR, IRQ_STACK)
993	IDT(116, INTR, IRQ_STACK); IDT(117, INTR, IRQ_STACK)
994	IDT(118, INTR, IRQ_STACK); IDT(119, INTR, IRQ_STACK)
995	IDT(120, INTR, IRQ_STACK); IDT(121, INTR, IRQ_STACK)
996	IDT(122, INTR, IRQ_STACK); IDT(123, INTR, IRQ_STACK)
997	IDT(124, INTR, IRQ_STACK); IDT(125, INTR, IRQ_STACK)
998	IDT(126, INTR, IRQ_STACK); IDT(127, INTR, IRQ_STACK)
999	IDT(128, INTR, IRQ_STACK); IDT(129, INTR, IRQ_STACK)
1000	IDT(130, INTR, IRQ_STACK); IDT(131, INTR, IRQ_STACK)
1001	IDT(132, INTR, IRQ_STACK); IDT(133, INTR, IRQ_STACK)
1002	IDT(134, INTR, IRQ_STACK); IDT(135, INTR, IRQ_STACK)
1003	IDT(136, INTR, IRQ_STACK); IDT(137, INTR, IRQ_STACK)
1004	IDT(138, INTR, IRQ_STACK); IDT(139, INTR, IRQ_STACK)
1005	IDT(140, INTR, IRQ_STACK); IDT(141, INTR, IRQ_STACK)
1006	IDT(142, INTR, IRQ_STACK); IDT(143, INTR, IRQ_STACK)
1007	IDT(144, INTR, IRQ_STACK); IDT(145, INTR, IRQ_STACK)
1008	IDT(146, INTR, IRQ_STACK); IDT(147, INTR, IRQ_STACK)
1009	IDT(148, INTR, IRQ_STACK); IDT(149, INTR, IRQ_STACK)
1010	IDT(150, INTR, IRQ_STACK); IDT(151, INTR, IRQ_STACK)
1011	IDT(152, INTR, IRQ_STACK); IDT(153, INTR, IRQ_STACK)
1012	IDT(154, INTR, IRQ_STACK); IDT(155, INTR, IRQ_STACK)
1013	IDT(156, INTR, IRQ_STACK); IDT(157, INTR, IRQ_STACK)
1014	IDT(158, INTR, IRQ_STACK); IDT(159, INTR, IRQ_STACK)
1015	IDT(160, INTR, IRQ_STACK); IDT(161, INTR, IRQ_STACK)
1016	IDT(162, INTR, IRQ_STACK); IDT(163, INTR, IRQ_STACK)
1017	IDT(164, INTR, IRQ_STACK); IDT(165, INTR, IRQ_STACK)
1018	IDT(166, INTR, IRQ_STACK); IDT(167, INTR, IRQ_STACK)
1019	IDT(168, INTR, IRQ_STACK); IDT(169, INTR, IRQ_STACK)
1020	IDT(170, INTR, IRQ_STACK); IDT(171, INTR, IRQ_STACK)
1021	IDT(172, INTR, IRQ_STACK); IDT(173, INTR, IRQ_STACK)
1022	IDT(174, INTR, IRQ_STACK); IDT(175, INTR, IRQ_STACK)
1023	IDT(176, INTR, IRQ_STACK); IDT(177, INTR, IRQ_STACK)
1024	IDT(178, INTR, IRQ_STACK); IDT(179, INTR, IRQ_STACK)
1025	IDT(180, INTR, IRQ_STACK); IDT(181, INTR, IRQ_STACK)
1026	IDT(182, INTR, IRQ_STACK); IDT(183, INTR, IRQ_STACK)
1027	IDT(184, INTR, IRQ_STACK); IDT(185, INTR, IRQ_STACK)
1028	IDT(186, INTR, IRQ_STACK); IDT(187, INTR, IRQ_STACK)
1029	IDT(188, INTR, IRQ_STACK); IDT(189, INTR, IRQ_STACK)
1030	IDT(190, INTR, IRQ_STACK); IDT(191, INTR, IRQ_STACK)
1031	IDT(192, INTR, IRQ_STACK); IDT(193, INTR, IRQ_STACK)
1032	IDT(194, INTR, IRQ_STACK); IDT(195, INTR, IRQ_STACK)
1033	IDT(196, INTR, IRQ_STACK); IDT(197, INTR, IRQ_STACK)
1034	IDT(198, INTR, IRQ_STACK); IDT(199, INTR, IRQ_STACK)
1035	IDT(200, INTR, IRQ_STACK); IDT(201, INTR, IRQ_STACK)
1036	IDT(202, INTR, IRQ_STACK); IDT(203, INTR, IRQ_STACK)
1037	IDT(204, INTR, IRQ_STACK); IDT(205, INTR, IRQ_STACK)
1038	IDT(206, INTR, IRQ_STACK); IDT(207, INTR, IRQ_STACK)
1039	IDT(208, INTR, IRQ_STACK); IDT(209, INTR, IRQ_STACK)
1040	IDT(210, INTR, IRQ_STACK); IDT(211, INTR, IRQ_STACK)
1041	IDT(212, INTR, IRQ_STACK); IDT(213, INTR, IRQ_STACK)
1042	IDT(214, INTR, IRQ_STACK); IDT(215, INTR, IRQ_STACK)
1043	IDT(216, INTR, IRQ_STACK); IDT(217, INTR, IRQ_STACK)
1044	IDT(218, INTR, IRQ_STACK); IDT(219, INTR, IRQ_STACK)
1045	IDT(220, INTR, IRQ_STACK); IDT(221, INTR, IRQ_STACK)
1046	IDT(222, INTR, IRQ_STACK); IDT(223, INTR, IRQ_STACK)
1047	IDT(224, INTR, IRQ_STACK); IDT(225, INTR, IRQ_STACK)
1048	IDT(226, INTR, IRQ_STACK); IDT(227, INTR, IRQ_STACK)
1049	IDT(228, INTR, IRQ_STACK); IDT(229, INTR, IRQ_STACK)
1050	IDT(230, INTR, IRQ_STACK); IDT(231, INTR, IRQ_STACK)
1051	IDT(232, INTR, IRQ_STACK); IDT(233, INTR, IRQ_STACK)
1052	IDT(234, INTR, IRQ_STACK); IDT(235, INTR, IRQ_STACK)
1053	IDT(236, INTR, IRQ_STACK); IDT(237, INTR, IRQ_STACK)
1054	IDT(238, INTR, IRQ_STACK); IDT(239, INTR, IRQ_STACK)
1055	IDT(240, INTR, IRQ_STACK); IDT(241, INTR, IRQ_STACK)
1056	IDT(242, INTR, IRQ_STACK); IDT(243, INTR, IRQ_STACK)
1057	IDT(244, INTR, IRQ_STACK); IDT(245, INTR, IRQ_STACK)
1058	IDT(246, INTR, IRQ_STACK); IDT(247, INTR, IRQ_STACK)
1059	IDT(248, INTR, IRQ_STACK); IDT(249, INTR, IRQ_STACK)
1060	IDT(250, INTR, IRQ_STACK); IDT(251, INTR, IRQ_STACK)
1061	IDT(252, INTR, IRQ_STACK); IDT(253, INTR, IRQ_STACK)
1062	IDT(254, INTR, IRQ_STACK); IDT(255, INTR, IRQ_STACK)
1063idt_end:
1064
1065idt48:  /* LIDT descriptor for 32 bit mode */
1066	.word (idt_end - idt - 1)
1067	.long idt
1068
1069idt80:  /* LIDT descriptor for 64 bit mode */
1070	.word (idt_end - idt - 1)
1071	.quad idt
1072
1073.section .gdt,"ad"
1074
1075/*
1076 * GDT - a single GDT is shared by all threads (and, eventually, all CPUs).
1077 * This layout must agree with the selectors in
1078 * include/arch/x86/intel64/thread.h.
1079 *
1080 * The 64-bit kernel code and data segment descriptors must be in sequence as
1081 * required by 'syscall'
1082 *
1083 * The 32-bit user code, 64-bit user code, and 64-bit user data segment
1084 * descriptors must be in sequence as required by 'sysret'
1085 */
1086.align 8
1087
1088gdt:
1089	.word 0,      0, 0,      0	/* 0x00: null descriptor */
1090	.word 0xFFFF, 0, 0x9A00, 0x00CF	/* 0x08: 32-bit kernel code */
1091	.word 0xFFFF, 0, 0x9200, 0x00CF	/* 0x10: 32-bit kernel data */
1092	.word 0,      0, 0x9800, 0x0020	/* 0x18: 64-bit kernel code */
1093	.word 0,      0, 0x9200, 0x0000	/* 0x20: 64-bit kernel data */
1094	.word 0xFFFF, 0, 0xFA00, 0x00CF /* 0x28: 32-bit user code (unused) */
1095	.word 0,      0, 0xF200, 0x0000	/* 0x30: 64-bit user data */
1096	.word 0,      0, 0xF800, 0x0020	/* 0x38: 64-bit user code */
1097
1098	/* Remaining entries are TSS for each enabled CPU */
1099
1100	DEFINE_TSS_STACK_ARRAY
1101
1102gdt_end:
1103
1104gdt48:  /* LGDT descriptor for 32 bit mode */
1105	.word (gdt_end - gdt - 1)
1106	.long gdt
1107
1108gdt80:  /* LGDT descriptor for long mode */
1109	.word (gdt_end - gdt - 1)
1110	.quad gdt
1111