1 /*
2 * Copyright (c) 2019 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/kernel_structs.h>
9 #include <kernel_internal.h>
10 #include <offsets_short.h>
11 #include <x86_mmu.h>
12
13 extern void x86_sse_init(struct k_thread *thread); /* in locore.S */
14
15 /* FIXME: This exists to make space for a "return address" at the top
16 * of the stack. Obviously this is unused at runtime, but is required
17 * for alignment: stacks at runtime should be 16-byte aligned, and a
18 * CALL will therefore push a return address that leaves the stack
19 * misaligned. Effectively we're wasting 8 bytes here to undo (!) the
20 * alignment that the upper level code already tried to do for us. We
21 * should clean this up.
22 */
23 struct x86_initial_frame {
24 /* zeroed return address for ABI */
25 uint64_t rip;
26 };
27
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)28 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
29 char *stack_ptr, k_thread_entry_t entry,
30 void *p1, void *p2, void *p3)
31 {
32 void *switch_entry;
33 struct x86_initial_frame *iframe;
34
35 #if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED)
36 /* This unconditionally set the first page of stack as guard page,
37 * which is only needed if the stack is not memory mapped.
38 */
39 z_x86_set_stack_guard(stack);
40 #else
41 ARG_UNUSED(stack);
42 #endif
43 #ifdef CONFIG_USERSPACE
44 switch_entry = z_x86_userspace_prepare_thread(thread);
45 thread->arch.cs = X86_KERNEL_CS;
46 thread->arch.ss = X86_KERNEL_DS;
47 #else
48 switch_entry = z_thread_entry;
49 #endif
50 iframe = Z_STACK_PTR_TO_FRAME(struct x86_initial_frame, stack_ptr);
51 iframe->rip = 0U;
52 thread->callee_saved.rsp = (long) iframe;
53 thread->callee_saved.rip = (long) switch_entry;
54 thread->callee_saved.rflags = EFLAGS_INITIAL;
55
56 /* Parameters to entry point, which is populated in
57 * thread->callee_saved.rip
58 */
59 thread->arch.rdi = (long) entry;
60 thread->arch.rsi = (long) p1;
61 thread->arch.rdx = (long) p2;
62 thread->arch.rcx = (long) p3;
63
64 x86_sse_init(thread);
65
66 thread->arch.flags = X86_THREAD_FLAG_ALL;
67 thread->switch_handle = thread;
68 }
69
arch_float_disable(struct k_thread * thread)70 int arch_float_disable(struct k_thread *thread)
71 {
72 /* x86-64 always has FP/SSE enabled so cannot be disabled */
73 ARG_UNUSED(thread);
74
75 return -ENOTSUP;
76 }
77
arch_float_enable(struct k_thread * thread,unsigned int options)78 int arch_float_enable(struct k_thread *thread, unsigned int options)
79 {
80 /* x86-64 always has FP/SSE enabled so nothing to do here */
81 ARG_UNUSED(thread);
82 ARG_UNUSED(options);
83
84 return 0;
85 }
86