Lines Matching full:esp

13  *	 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - unused -- was %gs on old stackprotector kernels
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
71 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
111 andl $0x0000ffff, 4*4(%esp)
114 testl $X86_EFLAGS_VM, 5*4(%esp)
117 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
120 orl $CS_FROM_KERNEL, 4*4(%esp)
125 * 6*4(%esp) - <previous context>
126 * 5*4(%esp) - flags
127 * 4*4(%esp) - cs
128 * 3*4(%esp) - ip
129 * 2*4(%esp) - orig_eax
130 * 1*4(%esp) - gs / function
131 * 0*4(%esp) - fs
137 * 14*4(%esp) - <previous context>
138 * 13*4(%esp) - gap / flags
139 * 12*4(%esp) - gap / cs
140 * 11*4(%esp) - gap / ip
141 * 10*4(%esp) - gap / orig_eax
142 * 9*4(%esp) - gap / gs / function
143 * 8*4(%esp) - gap / fs
144 * 7*4(%esp) - ss
145 * 6*4(%esp) - sp
146 * 5*4(%esp) - flags
147 * 4*4(%esp) - cs
148 * 3*4(%esp) - ip
149 * 2*4(%esp) - orig_eax
150 * 1*4(%esp) - gs / function
151 * 0*4(%esp) - fs
155 pushl %esp # sp (points at ss)
156 addl $7*4, (%esp) # point sp back at the previous context
157 pushl 7*4(%esp) # flags
158 pushl 7*4(%esp) # cs
159 pushl 7*4(%esp) # ip
160 pushl 7*4(%esp) # orig_eax
161 pushl 7*4(%esp) # gs / function
162 pushl 7*4(%esp) # fs
170 * mode and therefore have a nonzero SS base and an offset ESP,
172 * accesses through %esp, which automatically use SS.)
174 testl $CS_FROM_KERNEL, 1*4(%esp)
179 * regs->sp without lowering %esp in between, such that an NMI in the
184 movl 5*4(%esp), %eax # (modified) regs->sp
186 movl 4*4(%esp), %ecx # flags
189 movl 3*4(%esp), %ecx # cs
193 movl 2*4(%esp), %ecx # ip
196 movl 1*4(%esp), %ecx # eax
200 lea -4*4(%eax), %esp
271 4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */
312 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
314 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
318 movb PT_OLDSS(%esp), %ah
319 movb PT_CS(%esp), %al
328 * restore the high word of ESP for us on executing iret... This is an
331 * high word of ESP with the high word of the userspace ESP while
335 mov %esp, %edx /* load kernel esp */
336 mov PT_OLDESP(%esp), %eax /* load userspace esp */
337 mov %dx, %ax /* eax: new kernel esp */
343 pushl %eax /* new kernel esp */
350 lss (%esp), %esp /* switch to espfix segment */
359 * We need to be very careful here with the %esp switch, because an NMI
380 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
385 movl %esp, %esi
397 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
398 movb PT_CS(%esp), %cl
401 movl PT_CS(%esp), %ecx
427 movl %edi, %esp
443 * kernel-mode and %esp points to the entry-stack. When this
464 * %esi: Entry-Stack pointer (same as %esp)
480 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
488 orl $CS_FROM_USER_CR3, PT_CS(%esp)
503 * The %esp register must point to pt_regs on the task stack. It will
520 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
532 movl %esp, %esi
547 movl %ebx, %esp
566 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
572 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
575 movl %esp, %esi
598 movl %ebx, %esp
604 testl $CS_FROM_USER_CR3, PT_CS(%esp)
608 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
644 movl %esp, %eax
645 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */
646 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
693 movl %esp, TASK_threadsp(%eax)
694 movl TASK_threadsp(%edx), %esp
758 movl %esp, %eax
770 movl $0, PT_EAX(%esp)
798 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
801 * and does not save old EIP (!!!), ESP, or EFLAGS.
831 movl TSS_entry2task_stack(%esp), %esp
861 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
865 movl %esp, %eax
884 movl PT_EFLAGS(%esp), %edi
885 movl PT_EAX(%esp), %esi
890 movl PT_EIP(%esp), %edx /* pt_regs->ip */
891 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
892 1: mov PT_FS(%esp), %fs
895 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
901 movl %eax, %esp
911 btrl $X86_EFLAGS_IF_BIT, (%esp)
923 2: movl $0, PT_FS(%esp)
968 movl %esp, %eax
1019 * normal stack and adjusts ESP with the matching offset.
1028 subl $2*4, %esp
1029 sgdt (%esp)
1030 movl 2(%esp), %ecx /* GDT address */
1038 addl $2*4, %esp
1040 addl %esp, %eax /* the adjusted stack pointer */
1043 lss (%esp), %esp /* switch to the normal stack segment */
1065 movl PT_GS(%esp), %edi # get the function address
1068 movl PT_ORIG_EAX(%esp), %edx # get the error code
1069 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1071 movl %esp, %eax # pt_regs pointer
1076 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
1077 movb PT_CS(%esp), %al
1083 movl PT_CS(%esp), %eax
1095 movl %esp, %eax
1165 movl %esp, %eax # pt_regs pointer
1170 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1183 movl %esp, %ebx
1184 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1186 movl %ebx, %esp
1190 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1204 pushl %esp
1205 addl $4, (%esp)
1208 pushl 4*4(%esp) # flags
1209 pushl 4*4(%esp) # cs
1210 pushl 4*4(%esp) # ip
1218 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1221 movl %esp, %eax # pt_regs pointer
1236 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1247 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp