Lines Matching +full:ecx +full:- +full:2000

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
25 #include <asm/asm-offsets.h>
40 #include <asm/nospec-branch.h>
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
52 * This is the only entry point used for 64-bit system calls. The
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
82 * When user can change pt_regs->foo always force IRET. That is because
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
107 pushq %rax /* pt_regs->orig_ax */
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
124 * a completely clean 64-bit userspace context. If we're not,
139 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
150 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
151 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
153 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
154 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
176 * the opportunistic SYSRET conditions. For example, single-stepping
210 pushq RSP-RDI(%rdi) /* RSP */
239 * Save callee-saved registers
267 /* restore callee-saved registers */
338 * idtentry_body - Macro to emit code calling the C function
362 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
374 * idtentry - Macro to generate entry stubs for simple IDT entries
391 pushq $-1 /* ORIG_RAX: no syscall to restart */
396 * If coming from kernel space, create a 6-word gap to allow the
399 testb $3, CS-ORIG_RAX(%rsp)
437 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
459 pushq $-1 /* ORIG_RAX: no syscall to restart */
465 testb $3, CS-ORIG_RAX(%rsp)
489 * idtentry_vc - Macro to generate entry stub for #VC
498 * an IST stack by switching to the task stack if coming from user-space (which
500 * entered from kernel-mode.
502 * If entered from kernel-mode the return stack is validated first, and if it is
504 * will switch to a fall-back stack (VC2) and call a special handler function.
520 testb $3, CS-ORIG_RAX(%rsp)
525 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
534 * stack if it is safe to do so. If not it switches to the VC fall-back
546 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
554 * identical to the stack in the IRET frame or the VC fall-back stack,
586 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
671 addq $8, %rsp /* skip regs->orig_ax */
680 .long .Lnative_iret - (. + 4)
687 * 64-bit mode SS:RSP on the exception stack is always valid.
690 testb $4, (SS-RIP)(%rsp)
697 * This may fault. Non-paranoid faults on return to userspace are
699 * Double-faults due to espfix64 are handled in exc_double_fault.
717 * --- top of ESPFIX stack ---
722 * RIP <-- RSP points here when we're done
723 * RAX <-- espfix_waddr points here
724 * --- bottom of ESPFIX stack ---
749 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
772 * is read-only and RSP[31:16] are preloaded with the userspace
826 * existing activation in its critical region -- if so, we pop the current
862 movl %ds, %ecx
865 movl %es, %ecx
868 movl %fs, %ecx
871 movl %gs, %ecx
886 pushq $-1 /* orig_ax = -1 => not a system call */
898 * N 0 -> SWAPGS on exit
899 * 1 -> no SWAPGS on exit
903 * R14 - old CR3
904 * R15 - old SPEC_CTRL
951 /* EBX = 1 -> kernel GSBASE active, no restore required */
955 * The kernel-enforced convention is a negative GSBASE indicates
958 movl $MSR_GS_BASE, %ecx
963 /* EBX = 0 -> SWAPGS required on exit */
982 * only on return from non-NMI IST interrupts that came
994 * N 0 -> SWAPGS on exit
995 * 1 -> no SWAPGS on exit
999 * R14 - old CR3
1000 * R15 - old SPEC_CTRL
1007 * to the per-CPU x86_spec_ctrl_shadow variable.
1029 /* On non-FSGSBASE systems, conditionally do SWAPGS */
1078 movl %ecx, %eax /* zero extend */
1153 * stack of the previous NMI. NMI handlers are not re-entrant
1191 testb $3, CS-RIP+8(%rsp)
1211 pushq 5*8(%rdx) /* pt_regs->ss */
1212 pushq 4*8(%rdx) /* pt_regs->rsp */
1213 pushq 3*8(%rdx) /* pt_regs->flags */
1214 pushq 2*8(%rdx) /* pt_regs->cs */
1215 pushq 1*8(%rdx) /* pt_regs->rip */
1217 pushq $-1 /* pt_regs->orig_ax */
1226 * due to nesting -- we're on the normal thread stack and we're
1231 movq $-1, %rsi
1243 * +---------------------------------------------------------+
1249 * +---------------------------------------------------------+
1251 * +---------------------------------------------------------+
1253 * +---------------------------------------------------------+
1259 * +---------------------------------------------------------+
1265 * +---------------------------------------------------------+
1267 * +---------------------------------------------------------+
1269 * The "original" frame is used by hardware. Before re-enabling
1306 cmpl $1, -8(%rsp)
1345 leaq -10*8(%rsp), %rdx
1421 pushq -6*8(%rsp)
1432 pushq $-1 /* ORIG_RAX: no syscall to restart */
1445 movq $-1, %rsi
1468 /* EBX == 0 -> invoke SWAPGS */
1507 * This handles SYSCALL from 32-bit code. There is no way to program
1508 * MSRs to fully disable 32-bit SYSCALL.
1513 mov $-ENOSYS, %eax
1525 leaq -PTREGS_SIZE(%rax), %rsp