1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *	Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *	Andi Kleen.
9  *
10  *	CPU hotplug support - ashok.raj@intel.com
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of process handling..
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
38 #include <linux/io.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
41 
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
47 #include <asm/desc.h>
48 #include <asm/proto.h>
49 #include <asm/ia32.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
54 #include <asm/vdso.h>
55 #include <asm/intel_rdt_sched.h>
56 #include <asm/unistd.h>
57 #ifdef CONFIG_IA32_EMULATION
58 /* Not included via unistd.h */
59 #include <asm/unistd_32_ia32.h>
60 #endif
61 
62 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
63 
64 /* Prints also some state that isn't saved in the pt_regs */
__show_regs(struct pt_regs * regs,enum show_regs_mode mode)65 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
66 {
67 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
68 	unsigned long d0, d1, d2, d3, d6, d7;
69 	unsigned int fsindex, gsindex;
70 	unsigned int ds, cs, es;
71 
72 	show_iret_regs(regs);
73 
74 	if (regs->orig_ax != -1)
75 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
76 	else
77 		pr_cont("\n");
78 
79 	printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
80 	       regs->ax, regs->bx, regs->cx);
81 	printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
82 	       regs->dx, regs->si, regs->di);
83 	printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
84 	       regs->bp, regs->r8, regs->r9);
85 	printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
86 	       regs->r10, regs->r11, regs->r12);
87 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
88 	       regs->r13, regs->r14, regs->r15);
89 
90 	if (mode == SHOW_REGS_SHORT)
91 		return;
92 
93 	if (mode == SHOW_REGS_USER) {
94 		rdmsrl(MSR_FS_BASE, fs);
95 		rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
96 		printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
97 		       fs, shadowgs);
98 		return;
99 	}
100 
101 	asm("movl %%ds,%0" : "=r" (ds));
102 	asm("movl %%cs,%0" : "=r" (cs));
103 	asm("movl %%es,%0" : "=r" (es));
104 	asm("movl %%fs,%0" : "=r" (fsindex));
105 	asm("movl %%gs,%0" : "=r" (gsindex));
106 
107 	rdmsrl(MSR_FS_BASE, fs);
108 	rdmsrl(MSR_GS_BASE, gs);
109 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
110 
111 	cr0 = read_cr0();
112 	cr2 = read_cr2();
113 	cr3 = __read_cr3();
114 	cr4 = __read_cr4();
115 
116 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
117 	       fs, fsindex, gs, gsindex, shadowgs);
118 	printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
119 			es, cr0);
120 	printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
121 			cr4);
122 
123 	get_debugreg(d0, 0);
124 	get_debugreg(d1, 1);
125 	get_debugreg(d2, 2);
126 	get_debugreg(d3, 3);
127 	get_debugreg(d6, 6);
128 	get_debugreg(d7, 7);
129 
130 	/* Only print out debug registers if they are in their non-default state. */
131 	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
132 	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
133 		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
134 		       d0, d1, d2);
135 		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
136 		       d3, d6, d7);
137 	}
138 
139 	if (boot_cpu_has(X86_FEATURE_OSPKE))
140 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
141 }
142 
release_thread(struct task_struct * dead_task)143 void release_thread(struct task_struct *dead_task)
144 {
145 	if (dead_task->mm) {
146 #ifdef CONFIG_MODIFY_LDT_SYSCALL
147 		if (dead_task->mm->context.ldt) {
148 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
149 				dead_task->comm,
150 				dead_task->mm->context.ldt->entries,
151 				dead_task->mm->context.ldt->nr_entries);
152 			BUG();
153 		}
154 #endif
155 	}
156 }
157 
158 enum which_selector {
159 	FS,
160 	GS
161 };
162 
163 /*
164  * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
165  * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
166  * It's forcibly inlined because it'll generate better code and this function
167  * is hot.
168  */
save_base_legacy(struct task_struct * prev_p,unsigned short selector,enum which_selector which)169 static __always_inline void save_base_legacy(struct task_struct *prev_p,
170 					     unsigned short selector,
171 					     enum which_selector which)
172 {
173 	if (likely(selector == 0)) {
174 		/*
175 		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
176 		 * be the pre-existing saved base or it could be zero.  On AMD
177 		 * (with X86_BUG_NULL_SEG), the segment base could be almost
178 		 * anything.
179 		 *
180 		 * This branch is very hot (it's hit twice on almost every
181 		 * context switch between 64-bit programs), and avoiding
182 		 * the RDMSR helps a lot, so we just assume that whatever
183 		 * value is already saved is correct.  This matches historical
184 		 * Linux behavior, so it won't break existing applications.
185 		 *
186 		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
187 		 * report that the base is zero, it needs to actually be zero:
188 		 * see the corresponding logic in load_seg_legacy.
189 		 */
190 	} else {
191 		/*
192 		 * If the selector is 1, 2, or 3, then the base is zero on
193 		 * !X86_BUG_NULL_SEG CPUs and could be anything on
194 		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
195 		 * has never attempted to preserve the base across context
196 		 * switches.
197 		 *
198 		 * If selector > 3, then it refers to a real segment, and
199 		 * saving the base isn't necessary.
200 		 */
201 		if (which == FS)
202 			prev_p->thread.fsbase = 0;
203 		else
204 			prev_p->thread.gsbase = 0;
205 	}
206 }
207 
save_fsgs(struct task_struct * task)208 static __always_inline void save_fsgs(struct task_struct *task)
209 {
210 	savesegment(fs, task->thread.fsindex);
211 	savesegment(gs, task->thread.gsindex);
212 	save_base_legacy(task, task->thread.fsindex, FS);
213 	save_base_legacy(task, task->thread.gsindex, GS);
214 }
215 
216 #if IS_ENABLED(CONFIG_KVM)
217 /*
218  * While a process is running,current->thread.fsbase and current->thread.gsbase
219  * may not match the corresponding CPU registers (see save_base_legacy()). KVM
220  * wants an efficient way to save and restore FSBASE and GSBASE.
221  * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
222  */
save_fsgs_for_kvm(void)223 void save_fsgs_for_kvm(void)
224 {
225 	save_fsgs(current);
226 }
227 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
228 #endif
229 
loadseg(enum which_selector which,unsigned short sel)230 static __always_inline void loadseg(enum which_selector which,
231 				    unsigned short sel)
232 {
233 	if (which == FS)
234 		loadsegment(fs, sel);
235 	else
236 		load_gs_index(sel);
237 }
238 
load_seg_legacy(unsigned short prev_index,unsigned long prev_base,unsigned short next_index,unsigned long next_base,enum which_selector which)239 static __always_inline void load_seg_legacy(unsigned short prev_index,
240 					    unsigned long prev_base,
241 					    unsigned short next_index,
242 					    unsigned long next_base,
243 					    enum which_selector which)
244 {
245 	if (likely(next_index <= 3)) {
246 		/*
247 		 * The next task is using 64-bit TLS, is not using this
248 		 * segment at all, or is having fun with arcane CPU features.
249 		 */
250 		if (next_base == 0) {
251 			/*
252 			 * Nasty case: on AMD CPUs, we need to forcibly zero
253 			 * the base.
254 			 */
255 			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
256 				loadseg(which, __USER_DS);
257 				loadseg(which, next_index);
258 			} else {
259 				/*
260 				 * We could try to exhaustively detect cases
261 				 * under which we can skip the segment load,
262 				 * but there's really only one case that matters
263 				 * for performance: if both the previous and
264 				 * next states are fully zeroed, we can skip
265 				 * the load.
266 				 *
267 				 * (This assumes that prev_base == 0 has no
268 				 * false positives.  This is the case on
269 				 * Intel-style CPUs.)
270 				 */
271 				if (likely(prev_index | next_index | prev_base))
272 					loadseg(which, next_index);
273 			}
274 		} else {
275 			if (prev_index != next_index)
276 				loadseg(which, next_index);
277 			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
278 			       next_base);
279 		}
280 	} else {
281 		/*
282 		 * The next task is using a real segment.  Loading the selector
283 		 * is sufficient.
284 		 */
285 		loadseg(which, next_index);
286 	}
287 }
288 
copy_thread_tls(unsigned long clone_flags,unsigned long sp,unsigned long arg,struct task_struct * p,unsigned long tls)289 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
290 		unsigned long arg, struct task_struct *p, unsigned long tls)
291 {
292 	int err;
293 	struct pt_regs *childregs;
294 	struct fork_frame *fork_frame;
295 	struct inactive_task_frame *frame;
296 	struct task_struct *me = current;
297 
298 	childregs = task_pt_regs(p);
299 	fork_frame = container_of(childregs, struct fork_frame, regs);
300 	frame = &fork_frame->frame;
301 	frame->bp = 0;
302 	frame->ret_addr = (unsigned long) ret_from_fork;
303 	p->thread.sp = (unsigned long) fork_frame;
304 	p->thread.io_bitmap_ptr = NULL;
305 
306 	savesegment(gs, p->thread.gsindex);
307 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
308 	savesegment(fs, p->thread.fsindex);
309 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
310 	savesegment(es, p->thread.es);
311 	savesegment(ds, p->thread.ds);
312 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
313 
314 	if (unlikely(p->flags & PF_KTHREAD)) {
315 		/* kernel thread */
316 		memset(childregs, 0, sizeof(struct pt_regs));
317 		frame->bx = sp;		/* function */
318 		frame->r12 = arg;
319 		return 0;
320 	}
321 	frame->bx = 0;
322 	*childregs = *current_pt_regs();
323 
324 	childregs->ax = 0;
325 	if (sp)
326 		childregs->sp = sp;
327 
328 	err = -ENOMEM;
329 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
330 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
331 						  IO_BITMAP_BYTES, GFP_KERNEL);
332 		if (!p->thread.io_bitmap_ptr) {
333 			p->thread.io_bitmap_max = 0;
334 			return -ENOMEM;
335 		}
336 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
337 	}
338 
339 	/*
340 	 * Set a new TLS for the child thread?
341 	 */
342 	if (clone_flags & CLONE_SETTLS) {
343 #ifdef CONFIG_IA32_EMULATION
344 		if (in_ia32_syscall())
345 			err = do_set_thread_area(p, -1,
346 				(struct user_desc __user *)tls, 0);
347 		else
348 #endif
349 			err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
350 		if (err)
351 			goto out;
352 	}
353 	err = 0;
354 out:
355 	if (err && p->thread.io_bitmap_ptr) {
356 		kfree(p->thread.io_bitmap_ptr);
357 		p->thread.io_bitmap_max = 0;
358 	}
359 
360 	return err;
361 }
362 
363 static void
start_thread_common(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp,unsigned int _cs,unsigned int _ss,unsigned int _ds)364 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
365 		    unsigned long new_sp,
366 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
367 {
368 	WARN_ON_ONCE(regs != current_pt_regs());
369 
370 	if (static_cpu_has(X86_BUG_NULL_SEG)) {
371 		/* Loading zero below won't clear the base. */
372 		loadsegment(fs, __USER_DS);
373 		load_gs_index(__USER_DS);
374 	}
375 
376 	loadsegment(fs, 0);
377 	loadsegment(es, _ds);
378 	loadsegment(ds, _ds);
379 	load_gs_index(0);
380 
381 	regs->ip		= new_ip;
382 	regs->sp		= new_sp;
383 	regs->cs		= _cs;
384 	regs->ss		= _ss;
385 	regs->flags		= X86_EFLAGS_IF;
386 	force_iret();
387 }
388 
389 void
start_thread(struct pt_regs * regs,unsigned long new_ip,unsigned long new_sp)390 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
391 {
392 	start_thread_common(regs, new_ip, new_sp,
393 			    __USER_CS, __USER_DS, 0);
394 }
395 EXPORT_SYMBOL_GPL(start_thread);
396 
397 #ifdef CONFIG_COMPAT
compat_start_thread(struct pt_regs * regs,u32 new_ip,u32 new_sp)398 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
399 {
400 	start_thread_common(regs, new_ip, new_sp,
401 			    test_thread_flag(TIF_X32)
402 			    ? __USER_CS : __USER32_CS,
403 			    __USER_DS, __USER_DS);
404 }
405 #endif
406 
407 /*
408  *	switch_to(x,y) should switch tasks from x to y.
409  *
410  * This could still be optimized:
411  * - fold all the options into a flag word and test it with a single test.
412  * - could test fs/gs bitsliced
413  *
414  * Kprobes not supported here. Set the probe on schedule instead.
415  * Function graph tracer not supported too.
416  */
417 __visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct * prev_p,struct task_struct * next_p)418 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
419 {
420 	struct thread_struct *prev = &prev_p->thread;
421 	struct thread_struct *next = &next_p->thread;
422 	struct fpu *prev_fpu = &prev->fpu;
423 	struct fpu *next_fpu = &next->fpu;
424 	int cpu = smp_processor_id();
425 	struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
426 
427 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
428 		     this_cpu_read(irq_count) != -1);
429 
430 	switch_fpu_prepare(prev_fpu, cpu);
431 
432 	/* We must save %fs and %gs before load_TLS() because
433 	 * %fs and %gs may be cleared by load_TLS().
434 	 *
435 	 * (e.g. xen_load_tls())
436 	 */
437 	save_fsgs(prev_p);
438 
439 	/*
440 	 * Load TLS before restoring any segments so that segment loads
441 	 * reference the correct GDT entries.
442 	 */
443 	load_TLS(next, cpu);
444 
445 	/*
446 	 * Leave lazy mode, flushing any hypercalls made here.  This
447 	 * must be done after loading TLS entries in the GDT but before
448 	 * loading segments that might reference them, and and it must
449 	 * be done before fpu__restore(), so the TS bit is up to
450 	 * date.
451 	 */
452 	arch_end_context_switch(next_p);
453 
454 	/* Switch DS and ES.
455 	 *
456 	 * Reading them only returns the selectors, but writing them (if
457 	 * nonzero) loads the full descriptor from the GDT or LDT.  The
458 	 * LDT for next is loaded in switch_mm, and the GDT is loaded
459 	 * above.
460 	 *
461 	 * We therefore need to write new values to the segment
462 	 * registers on every context switch unless both the new and old
463 	 * values are zero.
464 	 *
465 	 * Note that we don't need to do anything for CS and SS, as
466 	 * those are saved and restored as part of pt_regs.
467 	 */
468 	savesegment(es, prev->es);
469 	if (unlikely(next->es | prev->es))
470 		loadsegment(es, next->es);
471 
472 	savesegment(ds, prev->ds);
473 	if (unlikely(next->ds | prev->ds))
474 		loadsegment(ds, next->ds);
475 
476 	load_seg_legacy(prev->fsindex, prev->fsbase,
477 			next->fsindex, next->fsbase, FS);
478 	load_seg_legacy(prev->gsindex, prev->gsbase,
479 			next->gsindex, next->gsbase, GS);
480 
481 	switch_fpu_finish(next_fpu, cpu);
482 
483 	/*
484 	 * Switch the PDA and FPU contexts.
485 	 */
486 	this_cpu_write(current_task, next_p);
487 	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
488 
489 	/* Reload sp0. */
490 	update_task_stack(next_p);
491 
492 	/*
493 	 * Now maybe reload the debug registers and handle I/O bitmaps
494 	 */
495 	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
496 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
497 		__switch_to_xtra(prev_p, next_p, tss);
498 
499 #ifdef CONFIG_XEN_PV
500 	/*
501 	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
502 	 * current_pt_regs()->flags may not match the current task's
503 	 * intended IOPL.  We need to switch it manually.
504 	 */
505 	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
506 		     prev->iopl != next->iopl))
507 		xen_set_iopl_mask(next->iopl);
508 #endif
509 
510 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
511 		/*
512 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
513 		 * does not update the cached descriptor.  As a result, if we
514 		 * do SYSRET while SS is NULL, we'll end up in user mode with
515 		 * SS apparently equal to __USER_DS but actually unusable.
516 		 *
517 		 * The straightforward workaround would be to fix it up just
518 		 * before SYSRET, but that would slow down the system call
519 		 * fast paths.  Instead, we ensure that SS is never NULL in
520 		 * system call context.  We do this by replacing NULL SS
521 		 * selectors at every context switch.  SYSCALL sets up a valid
522 		 * SS, so the only way to get NULL is to re-enter the kernel
523 		 * from CPL 3 through an interrupt.  Since that can't happen
524 		 * in the same task as a running syscall, we are guaranteed to
525 		 * context switch between every interrupt vector entry and a
526 		 * subsequent SYSRET.
527 		 *
528 		 * We read SS first because SS reads are much faster than
529 		 * writes.  Out of caution, we force SS to __KERNEL_DS even if
530 		 * it previously had a different non-NULL value.
531 		 */
532 		unsigned short ss_sel;
533 		savesegment(ss, ss_sel);
534 		if (ss_sel != __KERNEL_DS)
535 			loadsegment(ss, __KERNEL_DS);
536 	}
537 
538 	/* Load the Intel cache allocation PQR MSR. */
539 	intel_rdt_sched_in();
540 
541 	return prev_p;
542 }
543 
set_personality_64bit(void)544 void set_personality_64bit(void)
545 {
546 	/* inherit personality from parent */
547 
548 	/* Make sure to be in 64bit mode */
549 	clear_thread_flag(TIF_IA32);
550 	clear_thread_flag(TIF_ADDR32);
551 	clear_thread_flag(TIF_X32);
552 	/* Pretend that this comes from a 64bit execve */
553 	task_pt_regs(current)->orig_ax = __NR_execve;
554 	current_thread_info()->status &= ~TS_COMPAT;
555 
556 	/* Ensure the corresponding mm is not marked. */
557 	if (current->mm)
558 		current->mm->context.ia32_compat = 0;
559 
560 	/* TBD: overwrites user setup. Should have two bits.
561 	   But 64bit processes have always behaved this way,
562 	   so it's not too bad. The main problem is just that
563 	   32bit childs are affected again. */
564 	current->personality &= ~READ_IMPLIES_EXEC;
565 }
566 
__set_personality_x32(void)567 static void __set_personality_x32(void)
568 {
569 #ifdef CONFIG_X86_X32
570 	clear_thread_flag(TIF_IA32);
571 	set_thread_flag(TIF_X32);
572 	if (current->mm)
573 		current->mm->context.ia32_compat = TIF_X32;
574 	current->personality &= ~READ_IMPLIES_EXEC;
575 	/*
576 	 * in_compat_syscall() uses the presence of the x32 syscall bit
577 	 * flag to determine compat status.  The x86 mmap() code relies on
578 	 * the syscall bitness so set x32 syscall bit right here to make
579 	 * in_compat_syscall() work during exec().
580 	 *
581 	 * Pretend to come from a x32 execve.
582 	 */
583 	task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
584 	current_thread_info()->status &= ~TS_COMPAT;
585 #endif
586 }
587 
__set_personality_ia32(void)588 static void __set_personality_ia32(void)
589 {
590 #ifdef CONFIG_IA32_EMULATION
591 	set_thread_flag(TIF_IA32);
592 	clear_thread_flag(TIF_X32);
593 	if (current->mm)
594 		current->mm->context.ia32_compat = TIF_IA32;
595 	current->personality |= force_personality32;
596 	/* Prepare the first "return" to user space */
597 	task_pt_regs(current)->orig_ax = __NR_ia32_execve;
598 	current_thread_info()->status |= TS_COMPAT;
599 #endif
600 }
601 
set_personality_ia32(bool x32)602 void set_personality_ia32(bool x32)
603 {
604 	/* Make sure to be in 32bit mode */
605 	set_thread_flag(TIF_ADDR32);
606 
607 	if (x32)
608 		__set_personality_x32();
609 	else
610 		__set_personality_ia32();
611 }
612 EXPORT_SYMBOL_GPL(set_personality_ia32);
613 
614 #ifdef CONFIG_CHECKPOINT_RESTORE
prctl_map_vdso(const struct vdso_image * image,unsigned long addr)615 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
616 {
617 	int ret;
618 
619 	ret = map_vdso_once(image, addr);
620 	if (ret)
621 		return ret;
622 
623 	return (long)image->size;
624 }
625 #endif
626 
do_arch_prctl_64(struct task_struct * task,int option,unsigned long arg2)627 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
628 {
629 	int ret = 0;
630 	int doit = task == current;
631 	int cpu;
632 
633 	switch (option) {
634 	case ARCH_SET_GS:
635 		if (arg2 >= TASK_SIZE_MAX)
636 			return -EPERM;
637 		cpu = get_cpu();
638 		task->thread.gsindex = 0;
639 		task->thread.gsbase = arg2;
640 		if (doit) {
641 			load_gs_index(0);
642 			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
643 		}
644 		put_cpu();
645 		break;
646 	case ARCH_SET_FS:
647 		/* Not strictly needed for fs, but do it for symmetry
648 		   with gs */
649 		if (arg2 >= TASK_SIZE_MAX)
650 			return -EPERM;
651 		cpu = get_cpu();
652 		task->thread.fsindex = 0;
653 		task->thread.fsbase = arg2;
654 		if (doit) {
655 			/* set the selector to 0 to not confuse __switch_to */
656 			loadsegment(fs, 0);
657 			ret = wrmsrl_safe(MSR_FS_BASE, arg2);
658 		}
659 		put_cpu();
660 		break;
661 	case ARCH_GET_FS: {
662 		unsigned long base;
663 
664 		if (doit)
665 			rdmsrl(MSR_FS_BASE, base);
666 		else
667 			base = task->thread.fsbase;
668 		ret = put_user(base, (unsigned long __user *)arg2);
669 		break;
670 	}
671 	case ARCH_GET_GS: {
672 		unsigned long base;
673 
674 		if (doit)
675 			rdmsrl(MSR_KERNEL_GS_BASE, base);
676 		else
677 			base = task->thread.gsbase;
678 		ret = put_user(base, (unsigned long __user *)arg2);
679 		break;
680 	}
681 
682 #ifdef CONFIG_CHECKPOINT_RESTORE
683 # ifdef CONFIG_X86_X32_ABI
684 	case ARCH_MAP_VDSO_X32:
685 		return prctl_map_vdso(&vdso_image_x32, arg2);
686 # endif
687 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
688 	case ARCH_MAP_VDSO_32:
689 		return prctl_map_vdso(&vdso_image_32, arg2);
690 # endif
691 	case ARCH_MAP_VDSO_64:
692 		return prctl_map_vdso(&vdso_image_64, arg2);
693 #endif
694 
695 	default:
696 		ret = -EINVAL;
697 		break;
698 	}
699 
700 	return ret;
701 }
702 
SYSCALL_DEFINE2(arch_prctl,int,option,unsigned long,arg2)703 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
704 {
705 	long ret;
706 
707 	ret = do_arch_prctl_64(current, option, arg2);
708 	if (ret == -EINVAL)
709 		ret = do_arch_prctl_common(current, option, arg2);
710 
711 	return ret;
712 }
713 
714 #ifdef CONFIG_IA32_EMULATION
COMPAT_SYSCALL_DEFINE2(arch_prctl,int,option,unsigned long,arg2)715 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
716 {
717 	return do_arch_prctl_common(current, option, arg2);
718 }
719 #endif
720 
KSTK_ESP(struct task_struct * task)721 unsigned long KSTK_ESP(struct task_struct *task)
722 {
723 	return task_pt_regs(task)->sp;
724 }
725