1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
4 #define _ASM_X86_CPU_ENTRY_AREA_H
5
6 #include <linux/percpu-defs.h>
7 #include <asm/processor.h>
8 #include <asm/intel_ds.h>
9
10 #ifdef CONFIG_X86_64
11
12 /* Macro to enforce the same ordering and stack sizes */
13 #define ESTACKS_MEMBERS(guardsize, db2_holesize)\
14 char DF_stack_guard[guardsize]; \
15 char DF_stack[EXCEPTION_STKSZ]; \
16 char NMI_stack_guard[guardsize]; \
17 char NMI_stack[EXCEPTION_STKSZ]; \
18 char DB2_stack_guard[guardsize]; \
19 char DB2_stack[db2_holesize]; \
20 char DB1_stack_guard[guardsize]; \
21 char DB1_stack[EXCEPTION_STKSZ]; \
22 char DB_stack_guard[guardsize]; \
23 char DB_stack[EXCEPTION_STKSZ]; \
24 char MCE_stack_guard[guardsize]; \
25 char MCE_stack[EXCEPTION_STKSZ]; \
26 char IST_top_guard[guardsize]; \
27
28 /* The exception stacks' physical storage. No guard pages required */
29 struct exception_stacks {
30 ESTACKS_MEMBERS(0, 0)
31 };
32
33 /* The effective cpu entry area mapping with guard pages. */
34 struct cea_exception_stacks {
35 ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
36 };
37
38 /*
39 * The exception stack ordering in [cea_]exception_stacks
40 */
41 enum exception_stack_ordering {
42 ESTACK_DF,
43 ESTACK_NMI,
44 ESTACK_DB2,
45 ESTACK_DB1,
46 ESTACK_DB,
47 ESTACK_MCE,
48 N_EXCEPTION_STACKS
49 };
50
51 #define CEA_ESTACK_SIZE(st) \
52 sizeof(((struct cea_exception_stacks *)0)->st## _stack)
53
54 #define CEA_ESTACK_BOT(ceastp, st) \
55 ((unsigned long)&(ceastp)->st## _stack)
56
57 #define CEA_ESTACK_TOP(ceastp, st) \
58 (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
59
60 #define CEA_ESTACK_OFFS(st) \
61 offsetof(struct cea_exception_stacks, st## _stack)
62
63 #define CEA_ESTACK_PAGES \
64 (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
65
66 #endif
67
68 /*
69 * cpu_entry_area is a percpu region that contains things needed by the CPU
70 * and early entry/exit code. Real types aren't used for all fields here
71 * to avoid circular header dependencies.
72 *
73 * Every field is a virtual alias of some other allocated backing store.
74 * There is no direct allocation of a struct cpu_entry_area.
75 */
76 struct cpu_entry_area {
77 char gdt[PAGE_SIZE];
78
79 /*
80 * The GDT is just below entry_stack and thus serves (on x86_64) as
81 * a a read-only guard page.
82 */
83 struct entry_stack_page entry_stack_page;
84
85 /*
86 * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
87 * we need task switches to work, and task switches write to the TSS.
88 */
89 struct tss_struct tss;
90
91 #ifdef CONFIG_X86_64
92 /*
93 * Exception stacks used for IST entries with guard pages.
94 */
95 struct cea_exception_stacks estacks;
96 #endif
97 #ifdef CONFIG_CPU_SUP_INTEL
98 /*
99 * Per CPU debug store for Intel performance monitoring. Wastes a
100 * full page at the moment.
101 */
102 struct debug_store cpu_debug_store;
103 /*
104 * The actual PEBS/BTS buffers must be mapped to user space
105 * Reserve enough fixmap PTEs.
106 */
107 struct debug_store_buffers cpu_debug_buffers;
108 #endif
109 };
110
111 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
112 #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
113
114 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
115 DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
116
117 extern void setup_cpu_entry_areas(void);
118 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
119
120 #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
121 #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
122
123 #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
124
125 #define CPU_ENTRY_AREA_MAP_SIZE \
126 (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
127
128 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
129
cpu_entry_stack(int cpu)130 static inline struct entry_stack *cpu_entry_stack(int cpu)
131 {
132 return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
133 }
134
135 #define __this_cpu_ist_top_va(name) \
136 CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
137
138 #endif
139