1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1991,1992 Linus Torvalds 4 * 5 * entry_32.S contains the system-call and low-level fault and trap handling routines. 6 * 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 9 * If the order here is changed, it needs to be 10 * updated in fork.c:copy_process(), signal.c:do_signal(), 11 * ptrace.c and ptrace.h 12 * 13 * 0(%esp) - %ebx 14 * 4(%esp) - %ecx 15 * 8(%esp) - %edx 16 * C(%esp) - %esi 17 * 10(%esp) - %edi 18 * 14(%esp) - %ebp 19 * 18(%esp) - %eax 20 * 1C(%esp) - %ds 21 * 20(%esp) - %es 22 * 24(%esp) - %fs 23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS 24 * 2C(%esp) - orig_eax 25 * 30(%esp) - %eip 26 * 34(%esp) - %cs 27 * 38(%esp) - %eflags 28 * 3C(%esp) - %oldesp 29 * 40(%esp) - %oldss 30 */ 31 32#include <linux/linkage.h> 33#include <linux/err.h> 34#include <asm/thread_info.h> 35#include <asm/irqflags.h> 36#include <asm/errno.h> 37#include <asm/segment.h> 38#include <asm/smp.h> 39#include <asm/percpu.h> 40#include <asm/processor-flags.h> 41#include <asm/irq_vectors.h> 42#include <asm/cpufeatures.h> 43#include <asm/alternative-asm.h> 44#include <asm/asm.h> 45#include <asm/smap.h> 46#include <asm/frame.h> 47#include <asm/nospec-branch.h> 48 49 .section .entry.text, "ax" 50 51/* 52 * We use macros for low-level operations which need to be overridden 53 * for paravirtualization. The following will never clobber any registers: 54 * INTERRUPT_RETURN (aka. "iret") 55 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") 56 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). 57 * 58 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must 59 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). 60 * Allowing a register to be clobbered can shrink the paravirt replacement 61 * enough to patch inline, increasing performance. 62 */ 63 64#ifdef CONFIG_PREEMPT 65# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF 66#else 67# define preempt_stop(clobbers) 68# define resume_kernel restore_all_kernel 69#endif 70 71.macro TRACE_IRQS_IRET 72#ifdef CONFIG_TRACE_IRQFLAGS 73 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? 74 jz 1f 75 TRACE_IRQS_ON 761: 77#endif 78.endm 79 80#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) 81 82/* 83 * User gs save/restore 84 * 85 * %gs is used for userland TLS and kernel only uses it for stack 86 * canary which is required to be at %gs:20 by gcc. Read the comment 87 * at the top of stackprotector.h for more info. 88 * 89 * Local labels 98 and 99 are used. 90 */ 91#ifdef CONFIG_X86_32_LAZY_GS 92 93 /* unfortunately push/pop can't be no-op */ 94.macro PUSH_GS 95 pushl $0 96.endm 97.macro POP_GS pop=0 98 addl $(4 + \pop), %esp 99.endm 100.macro POP_GS_EX 101.endm 102 103 /* all the rest are no-op */ 104.macro PTGS_TO_GS 105.endm 106.macro PTGS_TO_GS_EX 107.endm 108.macro GS_TO_REG reg 109.endm 110.macro REG_TO_PTGS reg 111.endm 112.macro SET_KERNEL_GS reg 113.endm 114 115#else /* CONFIG_X86_32_LAZY_GS */ 116 117.macro PUSH_GS 118 pushl %gs 119.endm 120 121.macro POP_GS pop=0 12298: popl %gs 123 .if \pop <> 0 124 add $\pop, %esp 125 .endif 126.endm 127.macro POP_GS_EX 128.pushsection .fixup, "ax" 12999: movl $0, (%esp) 130 jmp 98b 131.popsection 132 _ASM_EXTABLE(98b, 99b) 133.endm 134 135.macro PTGS_TO_GS 13698: mov PT_GS(%esp), %gs 137.endm 138.macro PTGS_TO_GS_EX 139.pushsection .fixup, "ax" 14099: movl $0, PT_GS(%esp) 141 jmp 98b 142.popsection 143 _ASM_EXTABLE(98b, 99b) 144.endm 145 146.macro GS_TO_REG reg 147 movl %gs, \reg 148.endm 149.macro REG_TO_PTGS reg 150 movl \reg, PT_GS(%esp) 151.endm 152.macro SET_KERNEL_GS reg 153 movl $(__KERNEL_STACK_CANARY), \reg 154 movl \reg, %gs 155.endm 156 157#endif /* CONFIG_X86_32_LAZY_GS */ 158 159/* Unconditionally switch to user cr3 */ 160.macro SWITCH_TO_USER_CR3 scratch_reg:req 161 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 162 163 movl %cr3, \scratch_reg 164 orl $PTI_SWITCH_MASK, \scratch_reg 165 movl \scratch_reg, %cr3 166.Lend_\@: 167.endm 168 169.macro BUG_IF_WRONG_CR3 no_user_check=0 170#ifdef CONFIG_DEBUG_ENTRY 171 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 172 .if \no_user_check == 0 173 /* coming from usermode? */ 174 testl $SEGMENT_RPL_MASK, PT_CS(%esp) 175 jz .Lend_\@ 176 .endif 177 /* On user-cr3? */ 178 movl %cr3, %eax 179 testl $PTI_SWITCH_MASK, %eax 180 jnz .Lend_\@ 181 /* From userspace with kernel cr3 - BUG */ 182 ud2 183.Lend_\@: 184#endif 185.endm 186 187/* 188 * Switch to kernel cr3 if not already loaded and return current cr3 in 189 * \scratch_reg 190 */ 191.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 192 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 193 movl %cr3, \scratch_reg 194 /* Test if we are already on kernel CR3 */ 195 testl $PTI_SWITCH_MASK, \scratch_reg 196 jz .Lend_\@ 197 andl $(~PTI_SWITCH_MASK), \scratch_reg 198 movl \scratch_reg, %cr3 199 /* Return original CR3 in \scratch_reg */ 200 orl $PTI_SWITCH_MASK, \scratch_reg 201.Lend_\@: 202.endm 203 204.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 205 cld 206 PUSH_GS 207 pushl %fs 208 pushl %es 209 pushl %ds 210 pushl \pt_regs_ax 211 pushl %ebp 212 pushl %edi 213 pushl %esi 214 pushl %edx 215 pushl %ecx 216 pushl %ebx 217 movl $(__USER_DS), %edx 218 movl %edx, %ds 219 movl %edx, %es 220 movl $(__KERNEL_PERCPU), %edx 221 movl %edx, %fs 222 SET_KERNEL_GS %edx 223 224 /* Switch to kernel stack if necessary */ 225.if \switch_stacks > 0 226 SWITCH_TO_KERNEL_STACK 227.endif 228 229.endm 230 231.macro SAVE_ALL_NMI cr3_reg:req 232 SAVE_ALL 233 234 BUG_IF_WRONG_CR3 235 236 /* 237 * Now switch the CR3 when PTI is enabled. 238 * 239 * We can enter with either user or kernel cr3, the code will 240 * store the old cr3 in \cr3_reg and switches to the kernel cr3 241 * if necessary. 242 */ 243 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg 244 245.Lend_\@: 246.endm 247 248/* 249 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The 250 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding 251 * is just clearing the MSB, which makes it an invalid stack address and is also 252 * a signal to the unwinder that it's a pt_regs pointer in disguise. 253 * 254 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the 255 * original rbp. 256 */ 257.macro ENCODE_FRAME_POINTER 258#ifdef CONFIG_FRAME_POINTER 259 mov %esp, %ebp 260 andl $0x7fffffff, %ebp 261#endif 262.endm 263 264.macro RESTORE_INT_REGS 265 popl %ebx 266 popl %ecx 267 popl %edx 268 popl %esi 269 popl %edi 270 popl %ebp 271 popl %eax 272.endm 273 274.macro RESTORE_REGS pop=0 275 RESTORE_INT_REGS 2761: popl %ds 2772: popl %es 2783: popl %fs 279 POP_GS \pop 280.pushsection .fixup, "ax" 2814: movl $0, (%esp) 282 jmp 1b 2835: movl $0, (%esp) 284 jmp 2b 2856: movl $0, (%esp) 286 jmp 3b 287.popsection 288 _ASM_EXTABLE(1b, 4b) 289 _ASM_EXTABLE(2b, 5b) 290 _ASM_EXTABLE(3b, 6b) 291 POP_GS_EX 292.endm 293 294.macro RESTORE_ALL_NMI cr3_reg:req pop=0 295 /* 296 * Now switch the CR3 when PTI is enabled. 297 * 298 * We enter with kernel cr3 and switch the cr3 to the value 299 * stored on \cr3_reg, which is either a user or a kernel cr3. 300 */ 301 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI 302 303 testl $PTI_SWITCH_MASK, \cr3_reg 304 jz .Lswitched_\@ 305 306 /* User cr3 in \cr3_reg - write it to hardware cr3 */ 307 movl \cr3_reg, %cr3 308 309.Lswitched_\@: 310 311 BUG_IF_WRONG_CR3 312 313 RESTORE_REGS pop=\pop 314.endm 315 316.macro CHECK_AND_APPLY_ESPFIX 317#ifdef CONFIG_X86_ESPFIX32 318#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) 319 320 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX 321 322 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 323 /* 324 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we 325 * are returning to the kernel. 326 * See comments in process.c:copy_thread() for details. 327 */ 328 movb PT_OLDSS(%esp), %ah 329 movb PT_CS(%esp), %al 330 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 331 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 332 jne .Lend_\@ # returning to user-space with LDT SS 333 334 /* 335 * Setup and switch to ESPFIX stack 336 * 337 * We're returning to userspace with a 16 bit stack. The CPU will not 338 * restore the high word of ESP for us on executing iret... This is an 339 * "official" bug of all the x86-compatible CPUs, which we can work 340 * around to make dosemu and wine happy. We do this by preloading the 341 * high word of ESP with the high word of the userspace ESP while 342 * compensating for the offset by changing to the ESPFIX segment with 343 * a base address that matches for the difference. 344 */ 345 mov %esp, %edx /* load kernel esp */ 346 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 347 mov %dx, %ax /* eax: new kernel esp */ 348 sub %eax, %edx /* offset (low word is 0) */ 349 shr $16, %edx 350 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 351 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 352 pushl $__ESPFIX_SS 353 pushl %eax /* new kernel esp */ 354 /* 355 * Disable interrupts, but do not irqtrace this section: we 356 * will soon execute iret and the tracer was already set to 357 * the irqstate after the IRET: 358 */ 359 DISABLE_INTERRUPTS(CLBR_ANY) 360 lss (%esp), %esp /* switch to espfix segment */ 361.Lend_\@: 362#endif /* CONFIG_X86_ESPFIX32 */ 363.endm 364 365/* 366 * Called with pt_regs fully populated and kernel segments loaded, 367 * so we can access PER_CPU and use the integer registers. 368 * 369 * We need to be very careful here with the %esp switch, because an NMI 370 * can happen everywhere. If the NMI handler finds itself on the 371 * entry-stack, it will overwrite the task-stack and everything we 372 * copied there. So allocate the stack-frame on the task-stack and 373 * switch to it before we do any copying. 374 */ 375 376#define CS_FROM_ENTRY_STACK (1 << 31) 377#define CS_FROM_USER_CR3 (1 << 30) 378 379.macro SWITCH_TO_KERNEL_STACK 380 381 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 382 383 BUG_IF_WRONG_CR3 384 385 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 386 387 /* 388 * %eax now contains the entry cr3 and we carry it forward in 389 * that register for the time this macro runs 390 */ 391 392 /* 393 * The high bits of the CS dword (__csh) are used for 394 * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case 395 * hardware didn't do this for us. 396 */ 397 andl $(0x0000ffff), PT_CS(%esp) 398 399 /* Are we on the entry stack? Bail out if not! */ 400 movl PER_CPU_VAR(cpu_entry_area), %ecx 401 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 402 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ 403 cmpl $SIZEOF_entry_stack, %ecx 404 jae .Lend_\@ 405 406 /* Load stack pointer into %esi and %edi */ 407 movl %esp, %esi 408 movl %esi, %edi 409 410 /* Move %edi to the top of the entry stack */ 411 andl $(MASK_entry_stack), %edi 412 addl $(SIZEOF_entry_stack), %edi 413 414 /* Load top of task-stack into %edi */ 415 movl TSS_entry2task_stack(%edi), %edi 416 417 /* Special case - entry from kernel mode via entry stack */ 418#ifdef CONFIG_VM86 419 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS 420 movb PT_CS(%esp), %cl 421 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx 422#else 423 movl PT_CS(%esp), %ecx 424 andl $SEGMENT_RPL_MASK, %ecx 425#endif 426 cmpl $USER_RPL, %ecx 427 jb .Lentry_from_kernel_\@ 428 429 /* Bytes to copy */ 430 movl $PTREGS_SIZE, %ecx 431 432#ifdef CONFIG_VM86 433 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) 434 jz .Lcopy_pt_regs_\@ 435 436 /* 437 * Stack-frame contains 4 additional segment registers when 438 * coming from VM86 mode 439 */ 440 addl $(4 * 4), %ecx 441 442#endif 443.Lcopy_pt_regs_\@: 444 445 /* Allocate frame on task-stack */ 446 subl %ecx, %edi 447 448 /* Switch to task-stack */ 449 movl %edi, %esp 450 451 /* 452 * We are now on the task-stack and can safely copy over the 453 * stack-frame 454 */ 455 shrl $2, %ecx 456 cld 457 rep movsl 458 459 jmp .Lend_\@ 460 461.Lentry_from_kernel_\@: 462 463 /* 464 * This handles the case when we enter the kernel from 465 * kernel-mode and %esp points to the entry-stack. When this 466 * happens we need to switch to the task-stack to run C code, 467 * but switch back to the entry-stack again when we approach 468 * iret and return to the interrupted code-path. This usually 469 * happens when we hit an exception while restoring user-space 470 * segment registers on the way back to user-space or when the 471 * sysenter handler runs with eflags.tf set. 472 * 473 * When we switch to the task-stack here, we can't trust the 474 * contents of the entry-stack anymore, as the exception handler 475 * might be scheduled out or moved to another CPU. Therefore we 476 * copy the complete entry-stack to the task-stack and set a 477 * marker in the iret-frame (bit 31 of the CS dword) to detect 478 * what we've done on the iret path. 479 * 480 * On the iret path we copy everything back and switch to the 481 * entry-stack, so that the interrupted kernel code-path 482 * continues on the same stack it was interrupted with. 483 * 484 * Be aware that an NMI can happen anytime in this code. 485 * 486 * %esi: Entry-Stack pointer (same as %esp) 487 * %edi: Top of the task stack 488 * %eax: CR3 on kernel entry 489 */ 490 491 /* Calculate number of bytes on the entry stack in %ecx */ 492 movl %esi, %ecx 493 494 /* %ecx to the top of entry-stack */ 495 andl $(MASK_entry_stack), %ecx 496 addl $(SIZEOF_entry_stack), %ecx 497 498 /* Number of bytes on the entry stack to %ecx */ 499 sub %esi, %ecx 500 501 /* Mark stackframe as coming from entry stack */ 502 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 503 504 /* 505 * Test the cr3 used to enter the kernel and add a marker 506 * so that we can switch back to it before iret. 507 */ 508 testl $PTI_SWITCH_MASK, %eax 509 jz .Lcopy_pt_regs_\@ 510 orl $CS_FROM_USER_CR3, PT_CS(%esp) 511 512 /* 513 * %esi and %edi are unchanged, %ecx contains the number of 514 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate 515 * the stack-frame on task-stack and copy everything over 516 */ 517 jmp .Lcopy_pt_regs_\@ 518 519.Lend_\@: 520.endm 521 522/* 523 * Switch back from the kernel stack to the entry stack. 524 * 525 * The %esp register must point to pt_regs on the task stack. It will 526 * first calculate the size of the stack-frame to copy, depending on 527 * whether we return to VM86 mode or not. With that it uses 'rep movsl' 528 * to copy the contents of the stack over to the entry stack. 529 * 530 * We must be very careful here, as we can't trust the contents of the 531 * task-stack once we switched to the entry-stack. When an NMI happens 532 * while on the entry-stack, the NMI handler will switch back to the top 533 * of the task stack, overwriting our stack-frame we are about to copy. 534 * Therefore we switch the stack only after everything is copied over. 535 */ 536.macro SWITCH_TO_ENTRY_STACK 537 538 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 539 540 /* Bytes to copy */ 541 movl $PTREGS_SIZE, %ecx 542 543#ifdef CONFIG_VM86 544 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) 545 jz .Lcopy_pt_regs_\@ 546 547 /* Additional 4 registers to copy when returning to VM86 mode */ 548 addl $(4 * 4), %ecx 549 550.Lcopy_pt_regs_\@: 551#endif 552 553 /* Initialize source and destination for movsl */ 554 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 555 subl %ecx, %edi 556 movl %esp, %esi 557 558 /* Save future stack pointer in %ebx */ 559 movl %edi, %ebx 560 561 /* Copy over the stack-frame */ 562 shrl $2, %ecx 563 cld 564 rep movsl 565 566 /* 567 * Switch to entry-stack - needs to happen after everything is 568 * copied because the NMI handler will overwrite the task-stack 569 * when on entry-stack 570 */ 571 movl %ebx, %esp 572 573.Lend_\@: 574.endm 575 576/* 577 * This macro handles the case when we return to kernel-mode on the iret 578 * path and have to switch back to the entry stack and/or user-cr3 579 * 580 * See the comments below the .Lentry_from_kernel_\@ label in the 581 * SWITCH_TO_KERNEL_STACK macro for more details. 582 */ 583.macro PARANOID_EXIT_TO_KERNEL_MODE 584 585 /* 586 * Test if we entered the kernel with the entry-stack. Most 587 * likely we did not, because this code only runs on the 588 * return-to-kernel path. 589 */ 590 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 591 jz .Lend_\@ 592 593 /* Unlikely slow-path */ 594 595 /* Clear marker from stack-frame */ 596 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) 597 598 /* Copy the remaining task-stack contents to entry-stack */ 599 movl %esp, %esi 600 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 601 602 /* Bytes on the task-stack to ecx */ 603 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx 604 subl %esi, %ecx 605 606 /* Allocate stack-frame on entry-stack */ 607 subl %ecx, %edi 608 609 /* 610 * Save future stack-pointer, we must not switch until the 611 * copy is done, otherwise the NMI handler could destroy the 612 * contents of the task-stack we are about to copy. 613 */ 614 movl %edi, %ebx 615 616 /* Do the copy */ 617 shrl $2, %ecx 618 cld 619 rep movsl 620 621 /* Safe to switch to entry-stack now */ 622 movl %ebx, %esp 623 624 /* 625 * We came from entry-stack and need to check if we also need to 626 * switch back to user cr3. 627 */ 628 testl $CS_FROM_USER_CR3, PT_CS(%esp) 629 jz .Lend_\@ 630 631 /* Clear marker from stack-frame */ 632 andl $(~CS_FROM_USER_CR3), PT_CS(%esp) 633 634 SWITCH_TO_USER_CR3 scratch_reg=%eax 635 636.Lend_\@: 637.endm 638/* 639 * %eax: prev task 640 * %edx: next task 641 */ 642ENTRY(__switch_to_asm) 643 /* 644 * Save callee-saved registers 645 * This must match the order in struct inactive_task_frame 646 */ 647 pushl %ebp 648 pushl %ebx 649 pushl %edi 650 pushl %esi 651 652 /* switch stack */ 653 movl %esp, TASK_threadsp(%eax) 654 movl TASK_threadsp(%edx), %esp 655 656#ifdef CONFIG_STACKPROTECTOR 657 movl TASK_stack_canary(%edx), %ebx 658 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 659#endif 660 661#ifdef CONFIG_RETPOLINE 662 /* 663 * When switching from a shallower to a deeper call stack 664 * the RSB may either underflow or use entries populated 665 * with userspace addresses. On CPUs where those concerns 666 * exist, overwrite the RSB with entries which capture 667 * speculative execution to prevent attack. 668 */ 669 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 670#endif 671 672 /* restore callee-saved registers */ 673 popl %esi 674 popl %edi 675 popl %ebx 676 popl %ebp 677 678 jmp __switch_to 679END(__switch_to_asm) 680 681/* 682 * The unwinder expects the last frame on the stack to always be at the same 683 * offset from the end of the page, which allows it to validate the stack. 684 * Calling schedule_tail() directly would break that convention because its an 685 * asmlinkage function so its argument has to be pushed on the stack. This 686 * wrapper creates a proper "end of stack" frame header before the call. 687 */ 688ENTRY(schedule_tail_wrapper) 689 FRAME_BEGIN 690 691 pushl %eax 692 call schedule_tail 693 popl %eax 694 695 FRAME_END 696 ret 697ENDPROC(schedule_tail_wrapper) 698/* 699 * A newly forked process directly context switches into this address. 700 * 701 * eax: prev task we switched from 702 * ebx: kernel thread func (NULL for user thread) 703 * edi: kernel thread arg 704 */ 705ENTRY(ret_from_fork) 706 call schedule_tail_wrapper 707 708 testl %ebx, %ebx 709 jnz 1f /* kernel threads are uncommon */ 710 7112: 712 /* When we fork, we trace the syscall return in the child, too. */ 713 movl %esp, %eax 714 call syscall_return_slowpath 715 jmp restore_all 716 717 /* kernel thread */ 7181: movl %edi, %eax 719 CALL_NOSPEC %ebx 720 /* 721 * A kernel thread is allowed to return here after successfully 722 * calling do_execve(). Exit to userspace to complete the execve() 723 * syscall. 724 */ 725 movl $0, PT_EAX(%esp) 726 jmp 2b 727END(ret_from_fork) 728 729/* 730 * Return to user mode is not as complex as all this looks, 731 * but we want the default path for a system call return to 732 * go as quickly as possible which is why some of this is 733 * less clear than it otherwise should be. 734 */ 735 736 # userspace resumption stub bypassing syscall exit tracing 737 ALIGN 738ret_from_exception: 739 preempt_stop(CLBR_ANY) 740ret_from_intr: 741#ifdef CONFIG_VM86 742 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 743 movb PT_CS(%esp), %al 744 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 745#else 746 /* 747 * We can be coming here from child spawned by kernel_thread(). 748 */ 749 movl PT_CS(%esp), %eax 750 andl $SEGMENT_RPL_MASK, %eax 751#endif 752 cmpl $USER_RPL, %eax 753 jb resume_kernel # not returning to v8086 or userspace 754 755ENTRY(resume_userspace) 756 DISABLE_INTERRUPTS(CLBR_ANY) 757 TRACE_IRQS_OFF 758 movl %esp, %eax 759 call prepare_exit_to_usermode 760 jmp restore_all 761END(ret_from_exception) 762 763#ifdef CONFIG_PREEMPT 764ENTRY(resume_kernel) 765 DISABLE_INTERRUPTS(CLBR_ANY) 766.Lneed_resched: 767 cmpl $0, PER_CPU_VAR(__preempt_count) 768 jnz restore_all_kernel 769 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? 770 jz restore_all_kernel 771 call preempt_schedule_irq 772 jmp .Lneed_resched 773END(resume_kernel) 774#endif 775 776GLOBAL(__begin_SYSENTER_singlestep_region) 777/* 778 * All code from here through __end_SYSENTER_singlestep_region is subject 779 * to being single-stepped if a user program sets TF and executes SYSENTER. 780 * There is absolutely nothing that we can do to prevent this from happening 781 * (thanks Intel!). To keep our handling of this situation as simple as 782 * possible, we handle TF just like AC and NT, except that our #DB handler 783 * will ignore all of the single-step traps generated in this range. 784 */ 785 786#ifdef CONFIG_XEN 787/* 788 * Xen doesn't set %esp to be precisely what the normal SYSENTER 789 * entry point expects, so fix it up before using the normal path. 790 */ 791ENTRY(xen_sysenter_target) 792 addl $5*4, %esp /* remove xen-provided frame */ 793 jmp .Lsysenter_past_esp 794#endif 795 796/* 797 * 32-bit SYSENTER entry. 798 * 799 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here 800 * if X86_FEATURE_SEP is available. This is the preferred system call 801 * entry on 32-bit systems. 802 * 803 * The SYSENTER instruction, in principle, should *only* occur in the 804 * vDSO. In practice, a small number of Android devices were shipped 805 * with a copy of Bionic that inlined a SYSENTER instruction. This 806 * never happened in any of Google's Bionic versions -- it only happened 807 * in a narrow range of Intel-provided versions. 808 * 809 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. 810 * IF and VM in RFLAGS are cleared (IOW: interrupts are off). 811 * SYSENTER does not save anything on the stack, 812 * and does not save old EIP (!!!), ESP, or EFLAGS. 813 * 814 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting 815 * user and/or vm86 state), we explicitly disable the SYSENTER 816 * instruction in vm86 mode by reprogramming the MSRs. 817 * 818 * Arguments: 819 * eax system call number 820 * ebx arg1 821 * ecx arg2 822 * edx arg3 823 * esi arg4 824 * edi arg5 825 * ebp user stack 826 * 0(%ebp) arg6 827 */ 828ENTRY(entry_SYSENTER_32) 829 /* 830 * On entry-stack with all userspace-regs live - save and 831 * restore eflags and %eax to use it as scratch-reg for the cr3 832 * switch. 833 */ 834 pushfl 835 pushl %eax 836 BUG_IF_WRONG_CR3 no_user_check=1 837 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 838 popl %eax 839 popfl 840 841 /* Stack empty again, switch to task stack */ 842 movl TSS_entry2task_stack(%esp), %esp 843 844.Lsysenter_past_esp: 845 pushl $__USER_DS /* pt_regs->ss */ 846 pushl %ebp /* pt_regs->sp (stashed in bp) */ 847 pushfl /* pt_regs->flags (except IF = 0) */ 848 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 849 pushl $__USER_CS /* pt_regs->cs */ 850 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 851 pushl %eax /* pt_regs->orig_ax */ 852 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ 853 854 /* 855 * SYSENTER doesn't filter flags, so we need to clear NT, AC 856 * and TF ourselves. To save a few cycles, we can check whether 857 * either was set instead of doing an unconditional popfq. 858 * This needs to happen before enabling interrupts so that 859 * we don't get preempted with NT set. 860 * 861 * If TF is set, we will single-step all the way to here -- do_debug 862 * will ignore all the traps. (Yes, this is slow, but so is 863 * single-stepping in general. This allows us to avoid having 864 * a more complicated code to handle the case where a user program 865 * forces us to single-step through the SYSENTER entry code.) 866 * 867 * NB.: .Lsysenter_fix_flags is a label with the code under it moved 868 * out-of-line as an optimization: NT is unlikely to be set in the 869 * majority of the cases and instead of polluting the I$ unnecessarily, 870 * we're keeping that code behind a branch which will predict as 871 * not-taken and therefore its instructions won't be fetched. 872 */ 873 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) 874 jnz .Lsysenter_fix_flags 875.Lsysenter_flags_fixed: 876 877 /* 878 * User mode is traced as though IRQs are on, and SYSENTER 879 * turned them off. 880 */ 881 TRACE_IRQS_OFF 882 883 movl %esp, %eax 884 call do_fast_syscall_32 885 /* XEN PV guests always use IRET path */ 886 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 887 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV 888 889/* Opportunistic SYSEXIT */ 890 TRACE_IRQS_ON /* User mode traces as IRQs on. */ 891 892 /* 893 * Setup entry stack - we keep the pointer in %eax and do the 894 * switch after almost all user-state is restored. 895 */ 896 897 /* Load entry stack pointer and allocate frame for eflags/eax */ 898 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax 899 subl $(2*4), %eax 900 901 /* Copy eflags and eax to entry stack */ 902 movl PT_EFLAGS(%esp), %edi 903 movl PT_EAX(%esp), %esi 904 movl %edi, (%eax) 905 movl %esi, 4(%eax) 906 907 /* Restore user registers and segments */ 908 movl PT_EIP(%esp), %edx /* pt_regs->ip */ 909 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 9101: mov PT_FS(%esp), %fs 911 PTGS_TO_GS 912 913 popl %ebx /* pt_regs->bx */ 914 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ 915 popl %esi /* pt_regs->si */ 916 popl %edi /* pt_regs->di */ 917 popl %ebp /* pt_regs->bp */ 918 919 /* Switch to entry stack */ 920 movl %eax, %esp 921 922 /* Now ready to switch the cr3 */ 923 SWITCH_TO_USER_CR3 scratch_reg=%eax 924 925 /* 926 * Restore all flags except IF. (We restore IF separately because 927 * STI gives a one-instruction window in which we won't be interrupted, 928 * whereas POPF does not.) 929 */ 930 btrl $X86_EFLAGS_IF_BIT, (%esp) 931 BUG_IF_WRONG_CR3 no_user_check=1 932 popfl 933 popl %eax 934 935 /* 936 * Return back to the vDSO, which will pop ecx and edx. 937 * Don't bother with DS and ES (they already contain __USER_DS). 938 */ 939 sti 940 sysexit 941 942.pushsection .fixup, "ax" 9432: movl $0, PT_FS(%esp) 944 jmp 1b 945.popsection 946 _ASM_EXTABLE(1b, 2b) 947 PTGS_TO_GS_EX 948 949.Lsysenter_fix_flags: 950 pushl $X86_EFLAGS_FIXED 951 popfl 952 jmp .Lsysenter_flags_fixed 953GLOBAL(__end_SYSENTER_singlestep_region) 954ENDPROC(entry_SYSENTER_32) 955 956/* 957 * 32-bit legacy system call entry. 958 * 959 * 32-bit x86 Linux system calls traditionally used the INT $0x80 960 * instruction. INT $0x80 lands here. 961 * 962 * This entry point can be used by any 32-bit perform system calls. 963 * Instances of INT $0x80 can be found inline in various programs and 964 * libraries. It is also used by the vDSO's __kernel_vsyscall 965 * fallback for hardware that doesn't support a faster entry method. 966 * Restarted 32-bit system calls also fall back to INT $0x80 967 * regardless of what instruction was originally used to do the system 968 * call. (64-bit programs can use INT $0x80 as well, but they can 969 * only run on 64-bit kernels and therefore land in 970 * entry_INT80_compat.) 971 * 972 * This is considered a slow path. It is not used by most libc 973 * implementations on modern hardware except during process startup. 974 * 975 * Arguments: 976 * eax system call number 977 * ebx arg1 978 * ecx arg2 979 * edx arg3 980 * esi arg4 981 * edi arg5 982 * ebp arg6 983 */ 984ENTRY(entry_INT80_32) 985 ASM_CLAC 986 pushl %eax /* pt_regs->orig_ax */ 987 988 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ 989 990 /* 991 * User mode is traced as though IRQs are on, and the interrupt gate 992 * turned them off. 993 */ 994 TRACE_IRQS_OFF 995 996 movl %esp, %eax 997 call do_int80_syscall_32 998.Lsyscall_32_done: 999 1000restore_all: 1001 TRACE_IRQS_IRET 1002 SWITCH_TO_ENTRY_STACK 1003.Lrestore_all_notrace: 1004 CHECK_AND_APPLY_ESPFIX 1005.Lrestore_nocheck: 1006 /* Switch back to user CR3 */ 1007 SWITCH_TO_USER_CR3 scratch_reg=%eax 1008 1009 BUG_IF_WRONG_CR3 1010 1011 /* Restore user state */ 1012 RESTORE_REGS pop=4 # skip orig_eax/error_code 1013.Lirq_return: 1014 /* 1015 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 1016 * when returning from IPI handler and when returning from 1017 * scheduler to user-space. 1018 */ 1019 INTERRUPT_RETURN 1020 1021restore_all_kernel: 1022 TRACE_IRQS_IRET 1023 PARANOID_EXIT_TO_KERNEL_MODE 1024 BUG_IF_WRONG_CR3 1025 RESTORE_REGS 4 1026 jmp .Lirq_return 1027 1028.section .fixup, "ax" 1029ENTRY(iret_exc ) 1030 pushl $0 # no error code 1031 pushl $do_iret_error 1032 1033#ifdef CONFIG_DEBUG_ENTRY 1034 /* 1035 * The stack-frame here is the one that iret faulted on, so its a 1036 * return-to-user frame. We are on kernel-cr3 because we come here from 1037 * the fixup code. This confuses the CR3 checker, so switch to user-cr3 1038 * as the checker expects it. 1039 */ 1040 pushl %eax 1041 SWITCH_TO_USER_CR3 scratch_reg=%eax 1042 popl %eax 1043#endif 1044 1045 jmp common_exception 1046.previous 1047 _ASM_EXTABLE(.Lirq_return, iret_exc) 1048ENDPROC(entry_INT80_32) 1049 1050.macro FIXUP_ESPFIX_STACK 1051/* 1052 * Switch back for ESPFIX stack to the normal zerobased stack 1053 * 1054 * We can't call C functions using the ESPFIX stack. This code reads 1055 * the high word of the segment base from the GDT and swiches to the 1056 * normal stack and adjusts ESP with the matching offset. 1057 */ 1058#ifdef CONFIG_X86_ESPFIX32 1059 /* fixup the stack */ 1060 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ 1061 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ 1062 shl $16, %eax 1063 addl %esp, %eax /* the adjusted stack pointer */ 1064 pushl $__KERNEL_DS 1065 pushl %eax 1066 lss (%esp), %esp /* switch to the normal stack segment */ 1067#endif 1068.endm 1069.macro UNWIND_ESPFIX_STACK 1070#ifdef CONFIG_X86_ESPFIX32 1071 movl %ss, %eax 1072 /* see if on espfix stack */ 1073 cmpw $__ESPFIX_SS, %ax 1074 jne 27f 1075 movl $__KERNEL_DS, %eax 1076 movl %eax, %ds 1077 movl %eax, %es 1078 /* switch to normal stack */ 1079 FIXUP_ESPFIX_STACK 108027: 1081#endif 1082.endm 1083 1084/* 1085 * Build the entry stubs with some assembler magic. 1086 * We pack 1 stub into every 8-byte block. 1087 */ 1088 .align 8 1089ENTRY(irq_entries_start) 1090 vector=FIRST_EXTERNAL_VECTOR 1091 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 1092 pushl $(~vector+0x80) /* Note: always in signed byte range */ 1093 vector=vector+1 1094 jmp common_interrupt 1095 .align 8 1096 .endr 1097END(irq_entries_start) 1098 1099/* 1100 * the CPU automatically disables interrupts when executing an IRQ vector, 1101 * so IRQ-flags tracing has to follow that: 1102 */ 1103 .p2align CONFIG_X86_L1_CACHE_SHIFT 1104common_interrupt: 1105 ASM_CLAC 1106 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ 1107 1108 SAVE_ALL switch_stacks=1 1109 ENCODE_FRAME_POINTER 1110 TRACE_IRQS_OFF 1111 movl %esp, %eax 1112 call do_IRQ 1113 jmp ret_from_intr 1114ENDPROC(common_interrupt) 1115 1116#define BUILD_INTERRUPT3(name, nr, fn) \ 1117ENTRY(name) \ 1118 ASM_CLAC; \ 1119 pushl $~(nr); \ 1120 SAVE_ALL switch_stacks=1; \ 1121 ENCODE_FRAME_POINTER; \ 1122 TRACE_IRQS_OFF \ 1123 movl %esp, %eax; \ 1124 call fn; \ 1125 jmp ret_from_intr; \ 1126ENDPROC(name) 1127 1128#define BUILD_INTERRUPT(name, nr) \ 1129 BUILD_INTERRUPT3(name, nr, smp_##name); \ 1130 1131/* The include is where all of the SMP etc. interrupts come from */ 1132#include <asm/entry_arch.h> 1133 1134ENTRY(coprocessor_error) 1135 ASM_CLAC 1136 pushl $0 1137 pushl $do_coprocessor_error 1138 jmp common_exception 1139END(coprocessor_error) 1140 1141ENTRY(simd_coprocessor_error) 1142 ASM_CLAC 1143 pushl $0 1144#ifdef CONFIG_X86_INVD_BUG 1145 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ 1146 ALTERNATIVE "pushl $do_general_protection", \ 1147 "pushl $do_simd_coprocessor_error", \ 1148 X86_FEATURE_XMM 1149#else 1150 pushl $do_simd_coprocessor_error 1151#endif 1152 jmp common_exception 1153END(simd_coprocessor_error) 1154 1155ENTRY(device_not_available) 1156 ASM_CLAC 1157 pushl $-1 # mark this as an int 1158 pushl $do_device_not_available 1159 jmp common_exception 1160END(device_not_available) 1161 1162#ifdef CONFIG_PARAVIRT 1163ENTRY(native_iret) 1164 iret 1165 _ASM_EXTABLE(native_iret, iret_exc) 1166END(native_iret) 1167#endif 1168 1169ENTRY(overflow) 1170 ASM_CLAC 1171 pushl $0 1172 pushl $do_overflow 1173 jmp common_exception 1174END(overflow) 1175 1176ENTRY(bounds) 1177 ASM_CLAC 1178 pushl $0 1179 pushl $do_bounds 1180 jmp common_exception 1181END(bounds) 1182 1183ENTRY(invalid_op) 1184 ASM_CLAC 1185 pushl $0 1186 pushl $do_invalid_op 1187 jmp common_exception 1188END(invalid_op) 1189 1190ENTRY(coprocessor_segment_overrun) 1191 ASM_CLAC 1192 pushl $0 1193 pushl $do_coprocessor_segment_overrun 1194 jmp common_exception 1195END(coprocessor_segment_overrun) 1196 1197ENTRY(invalid_TSS) 1198 ASM_CLAC 1199 pushl $do_invalid_TSS 1200 jmp common_exception 1201END(invalid_TSS) 1202 1203ENTRY(segment_not_present) 1204 ASM_CLAC 1205 pushl $do_segment_not_present 1206 jmp common_exception 1207END(segment_not_present) 1208 1209ENTRY(stack_segment) 1210 ASM_CLAC 1211 pushl $do_stack_segment 1212 jmp common_exception 1213END(stack_segment) 1214 1215ENTRY(alignment_check) 1216 ASM_CLAC 1217 pushl $do_alignment_check 1218 jmp common_exception 1219END(alignment_check) 1220 1221ENTRY(divide_error) 1222 ASM_CLAC 1223 pushl $0 # no error code 1224 pushl $do_divide_error 1225 jmp common_exception 1226END(divide_error) 1227 1228#ifdef CONFIG_X86_MCE 1229ENTRY(machine_check) 1230 ASM_CLAC 1231 pushl $0 1232 pushl machine_check_vector 1233 jmp common_exception 1234END(machine_check) 1235#endif 1236 1237ENTRY(spurious_interrupt_bug) 1238 ASM_CLAC 1239 pushl $0 1240 pushl $do_spurious_interrupt_bug 1241 jmp common_exception 1242END(spurious_interrupt_bug) 1243 1244#ifdef CONFIG_XEN 1245ENTRY(xen_hypervisor_callback) 1246 pushl $-1 /* orig_ax = -1 => not a system call */ 1247 SAVE_ALL 1248 ENCODE_FRAME_POINTER 1249 TRACE_IRQS_OFF 1250 1251 /* 1252 * Check to see if we got the event in the critical 1253 * region in xen_iret_direct, after we've reenabled 1254 * events and checked for pending events. This simulates 1255 * iret instruction's behaviour where it delivers a 1256 * pending interrupt when enabling interrupts: 1257 */ 1258 movl PT_EIP(%esp), %eax 1259 cmpl $xen_iret_start_crit, %eax 1260 jb 1f 1261 cmpl $xen_iret_end_crit, %eax 1262 jae 1f 1263 1264 jmp xen_iret_crit_fixup 1265 1266ENTRY(xen_do_upcall) 12671: mov %esp, %eax 1268 call xen_evtchn_do_upcall 1269#ifndef CONFIG_PREEMPT 1270 call xen_maybe_preempt_hcall 1271#endif 1272 jmp ret_from_intr 1273ENDPROC(xen_hypervisor_callback) 1274 1275/* 1276 * Hypervisor uses this for application faults while it executes. 1277 * We get here for two reasons: 1278 * 1. Fault while reloading DS, ES, FS or GS 1279 * 2. Fault while executing IRET 1280 * Category 1 we fix up by reattempting the load, and zeroing the segment 1281 * register if the load fails. 1282 * Category 2 we fix up by jumping to do_iret_error. We cannot use the 1283 * normal Linux return path in this case because if we use the IRET hypercall 1284 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1285 * We distinguish between categories by maintaining a status value in EAX. 1286 */ 1287ENTRY(xen_failsafe_callback) 1288 pushl %eax 1289 movl $1, %eax 12901: mov 4(%esp), %ds 12912: mov 8(%esp), %es 12923: mov 12(%esp), %fs 12934: mov 16(%esp), %gs 1294 /* EAX == 0 => Category 1 (Bad segment) 1295 EAX != 0 => Category 2 (Bad IRET) */ 1296 testl %eax, %eax 1297 popl %eax 1298 lea 16(%esp), %esp 1299 jz 5f 1300 jmp iret_exc 13015: pushl $-1 /* orig_ax = -1 => not a system call */ 1302 SAVE_ALL 1303 ENCODE_FRAME_POINTER 1304 jmp ret_from_exception 1305 1306.section .fixup, "ax" 13076: xorl %eax, %eax 1308 movl %eax, 4(%esp) 1309 jmp 1b 13107: xorl %eax, %eax 1311 movl %eax, 8(%esp) 1312 jmp 2b 13138: xorl %eax, %eax 1314 movl %eax, 12(%esp) 1315 jmp 3b 13169: xorl %eax, %eax 1317 movl %eax, 16(%esp) 1318 jmp 4b 1319.previous 1320 _ASM_EXTABLE(1b, 6b) 1321 _ASM_EXTABLE(2b, 7b) 1322 _ASM_EXTABLE(3b, 8b) 1323 _ASM_EXTABLE(4b, 9b) 1324ENDPROC(xen_failsafe_callback) 1325 1326BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, 1327 xen_evtchn_do_upcall) 1328 1329#endif /* CONFIG_XEN */ 1330 1331#if IS_ENABLED(CONFIG_HYPERV) 1332 1333BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, 1334 hyperv_vector_handler) 1335 1336BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, 1337 hyperv_reenlightenment_intr) 1338 1339BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, 1340 hv_stimer0_vector_handler) 1341 1342#endif /* CONFIG_HYPERV */ 1343 1344ENTRY(page_fault) 1345 ASM_CLAC 1346 pushl $do_page_fault 1347 ALIGN 1348 jmp common_exception 1349END(page_fault) 1350 1351common_exception: 1352 /* the function address is in %gs's slot on the stack */ 1353 pushl %fs 1354 pushl %es 1355 pushl %ds 1356 pushl %eax 1357 movl $(__USER_DS), %eax 1358 movl %eax, %ds 1359 movl %eax, %es 1360 movl $(__KERNEL_PERCPU), %eax 1361 movl %eax, %fs 1362 pushl %ebp 1363 pushl %edi 1364 pushl %esi 1365 pushl %edx 1366 pushl %ecx 1367 pushl %ebx 1368 SWITCH_TO_KERNEL_STACK 1369 ENCODE_FRAME_POINTER 1370 cld 1371 UNWIND_ESPFIX_STACK 1372 GS_TO_REG %ecx 1373 movl PT_GS(%esp), %edi # get the function address 1374 movl PT_ORIG_EAX(%esp), %edx # get the error code 1375 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1376 REG_TO_PTGS %ecx 1377 SET_KERNEL_GS %ecx 1378 TRACE_IRQS_OFF 1379 movl %esp, %eax # pt_regs pointer 1380 CALL_NOSPEC %edi 1381 jmp ret_from_exception 1382END(common_exception) 1383 1384ENTRY(debug) 1385 /* 1386 * Entry from sysenter is now handled in common_exception 1387 */ 1388 ASM_CLAC 1389 pushl $-1 # mark this as an int 1390 pushl $do_debug 1391 jmp common_exception 1392END(debug) 1393 1394/* 1395 * NMI is doubly nasty. It can happen on the first instruction of 1396 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning 1397 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 1398 * switched stacks. We handle both conditions by simply checking whether we 1399 * interrupted kernel code running on the SYSENTER stack. 1400 */ 1401ENTRY(nmi) 1402 ASM_CLAC 1403 1404#ifdef CONFIG_X86_ESPFIX32 1405 pushl %eax 1406 movl %ss, %eax 1407 cmpw $__ESPFIX_SS, %ax 1408 popl %eax 1409 je .Lnmi_espfix_stack 1410#endif 1411 1412 pushl %eax # pt_regs->orig_ax 1413 SAVE_ALL_NMI cr3_reg=%edi 1414 ENCODE_FRAME_POINTER 1415 xorl %edx, %edx # zero error code 1416 movl %esp, %eax # pt_regs pointer 1417 1418 /* Are we currently on the SYSENTER stack? */ 1419 movl PER_CPU_VAR(cpu_entry_area), %ecx 1420 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 1421 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ 1422 cmpl $SIZEOF_entry_stack, %ecx 1423 jb .Lnmi_from_sysenter_stack 1424 1425 /* Not on SYSENTER stack. */ 1426 call do_nmi 1427 jmp .Lnmi_return 1428 1429.Lnmi_from_sysenter_stack: 1430 /* 1431 * We're on the SYSENTER stack. Switch off. No one (not even debug) 1432 * is using the thread stack right now, so it's safe for us to use it. 1433 */ 1434 movl %esp, %ebx 1435 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp 1436 call do_nmi 1437 movl %ebx, %esp 1438 1439.Lnmi_return: 1440 CHECK_AND_APPLY_ESPFIX 1441 RESTORE_ALL_NMI cr3_reg=%edi pop=4 1442 jmp .Lirq_return 1443 1444#ifdef CONFIG_X86_ESPFIX32 1445.Lnmi_espfix_stack: 1446 /* 1447 * create the pointer to lss back 1448 */ 1449 pushl %ss 1450 pushl %esp 1451 addl $4, (%esp) 1452 /* copy the iret frame of 12 bytes */ 1453 .rept 3 1454 pushl 16(%esp) 1455 .endr 1456 pushl %eax 1457 SAVE_ALL_NMI cr3_reg=%edi 1458 ENCODE_FRAME_POINTER 1459 FIXUP_ESPFIX_STACK # %eax == %esp 1460 xorl %edx, %edx # zero error code 1461 call do_nmi 1462 RESTORE_ALL_NMI cr3_reg=%edi 1463 lss 12+4(%esp), %esp # back to espfix stack 1464 jmp .Lirq_return 1465#endif 1466END(nmi) 1467 1468ENTRY(int3) 1469 ASM_CLAC 1470 pushl $-1 # mark this as an int 1471 1472 SAVE_ALL switch_stacks=1 1473 ENCODE_FRAME_POINTER 1474 TRACE_IRQS_OFF 1475 xorl %edx, %edx # zero error code 1476 movl %esp, %eax # pt_regs pointer 1477 call do_int3 1478 jmp ret_from_exception 1479END(int3) 1480 1481ENTRY(general_protection) 1482 pushl $do_general_protection 1483 jmp common_exception 1484END(general_protection) 1485 1486#ifdef CONFIG_KVM_GUEST 1487ENTRY(async_page_fault) 1488 ASM_CLAC 1489 pushl $do_async_page_fault 1490 jmp common_exception 1491END(async_page_fault) 1492#endif 1493 1494ENTRY(rewind_stack_do_exit) 1495 /* Prevent any naive code from trying to unwind to our caller. */ 1496 xorl %ebp, %ebp 1497 1498 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi 1499 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp 1500 1501 call do_exit 15021: jmp 1b 1503END(rewind_stack_do_exit) 1504