1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _X86_IRQFLAGS_H_
3 #define _X86_IRQFLAGS_H_
4 
5 #include <asm/processor-flags.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #include <asm/nospec-branch.h>
10 
11 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
12 #define __cpuidle __section(".cpuidle.text")
13 
14 /*
15  * Interrupt control:
16  */
17 
18 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19 extern inline unsigned long native_save_fl(void);
native_save_fl(void)20 extern __always_inline unsigned long native_save_fl(void)
21 {
22 	unsigned long flags;
23 
24 	/*
25 	 * "=rm" is safe here, because "pop" adjusts the stack before
26 	 * it evaluates its effective address -- this is part of the
27 	 * documented behavior of the "pop" instruction.
28 	 */
29 	asm volatile("# __raw_save_flags\n\t"
30 		     "pushf ; pop %0"
31 		     : "=rm" (flags)
32 		     : /* no input */
33 		     : "memory");
34 
35 	return flags;
36 }
37 
38 extern inline void native_restore_fl(unsigned long flags);
native_restore_fl(unsigned long flags)39 extern inline void native_restore_fl(unsigned long flags)
40 {
41 	asm volatile("push %0 ; popf"
42 		     : /* no output */
43 		     :"g" (flags)
44 		     :"memory", "cc");
45 }
46 
native_irq_disable(void)47 static __always_inline void native_irq_disable(void)
48 {
49 	asm volatile("cli": : :"memory");
50 }
51 
native_irq_enable(void)52 static __always_inline void native_irq_enable(void)
53 {
54 	asm volatile("sti": : :"memory");
55 }
56 
native_safe_halt(void)57 static inline __cpuidle void native_safe_halt(void)
58 {
59 	mds_idle_clear_cpu_buffers();
60 	asm volatile("sti; hlt": : :"memory");
61 }
62 
native_halt(void)63 static inline __cpuidle void native_halt(void)
64 {
65 	mds_idle_clear_cpu_buffers();
66 	asm volatile("hlt": : :"memory");
67 }
68 
69 #endif
70 
71 #ifdef CONFIG_PARAVIRT_XXL
72 #include <asm/paravirt.h>
73 #else
74 #ifndef __ASSEMBLY__
75 #include <linux/types.h>
76 
arch_local_save_flags(void)77 static __always_inline unsigned long arch_local_save_flags(void)
78 {
79 	return native_save_fl();
80 }
81 
arch_local_irq_restore(unsigned long flags)82 static __always_inline void arch_local_irq_restore(unsigned long flags)
83 {
84 	native_restore_fl(flags);
85 }
86 
arch_local_irq_disable(void)87 static __always_inline void arch_local_irq_disable(void)
88 {
89 	native_irq_disable();
90 }
91 
arch_local_irq_enable(void)92 static __always_inline void arch_local_irq_enable(void)
93 {
94 	native_irq_enable();
95 }
96 
97 /*
98  * Used in the idle loop; sti takes one instruction cycle
99  * to complete:
100  */
arch_safe_halt(void)101 static inline __cpuidle void arch_safe_halt(void)
102 {
103 	native_safe_halt();
104 }
105 
106 /*
107  * Used when interrupts are already enabled or to
108  * shutdown the processor:
109  */
halt(void)110 static inline __cpuidle void halt(void)
111 {
112 	native_halt();
113 }
114 
115 /*
116  * For spinlocks, etc:
117  */
arch_local_irq_save(void)118 static __always_inline unsigned long arch_local_irq_save(void)
119 {
120 	unsigned long flags = arch_local_save_flags();
121 	arch_local_irq_disable();
122 	return flags;
123 }
124 #else
125 
126 #define ENABLE_INTERRUPTS(x)	sti
127 #define DISABLE_INTERRUPTS(x)	cli
128 
129 #ifdef CONFIG_X86_64
130 #ifdef CONFIG_DEBUG_ENTRY
131 #define SAVE_FLAGS(x)		pushfq; popq %rax
132 #endif
133 
134 #define SWAPGS	swapgs
135 /*
136  * Currently paravirt can't handle swapgs nicely when we
137  * don't have a stack we can rely on (such as a user space
138  * stack).  So we either find a way around these or just fault
139  * and emulate if a guest tries to call swapgs directly.
140  *
141  * Either way, this is a good way to document that we don't
142  * have a reliable stack. x86_64 only.
143  */
144 #define SWAPGS_UNSAFE_STACK	swapgs
145 
146 #define INTERRUPT_RETURN	jmp native_iret
147 #define USERGS_SYSRET64				\
148 	swapgs;					\
149 	sysretq;
150 #define USERGS_SYSRET32				\
151 	swapgs;					\
152 	sysretl
153 
154 #else
155 #define INTERRUPT_RETURN		iret
156 #endif
157 
158 #endif /* __ASSEMBLY__ */
159 #endif /* CONFIG_PARAVIRT_XXL */
160 
161 #ifndef __ASSEMBLY__
arch_irqs_disabled_flags(unsigned long flags)162 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
163 {
164 	return !(flags & X86_EFLAGS_IF);
165 }
166 
arch_irqs_disabled(void)167 static __always_inline int arch_irqs_disabled(void)
168 {
169 	unsigned long flags = arch_local_save_flags();
170 
171 	return arch_irqs_disabled_flags(flags);
172 }
173 #endif /* !__ASSEMBLY__ */
174 
175 #endif
176