1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4  */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7 
8 #ifdef __KERNEL__
9 
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14 
15 #ifdef CONFIG_PPC64
16 
17 /*
18  * PACA flags in paca->irq_happened.
19  *
20  * This bits are set when interrupts occur while soft-disabled
21  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22  * is set whenever we manually hard disable.
23  */
24 #define PACA_IRQ_HARD_DIS	0x01
25 #define PACA_IRQ_DBELL		0x02
26 #define PACA_IRQ_EE		0x04
27 #define PACA_IRQ_DEC		0x08 /* Or FIT */
28 #define PACA_IRQ_HMI		0x10
29 #define PACA_IRQ_PMI		0x20
30 
31 /*
32  * Some soft-masked interrupts must be hard masked until they are replayed
33  * (e.g., because the soft-masked handler does not clear the exception).
34  */
35 #ifdef CONFIG_PPC_BOOK3S
36 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
37 #else
38 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
39 #endif
40 
41 /*
42  * flags for paca->irq_soft_mask
43  */
44 #define IRQS_ENABLED		0
45 #define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
46 #define IRQS_PMI_DISABLED	2
47 #define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
48 
49 #endif /* CONFIG_PPC64 */
50 
51 #ifndef __ASSEMBLY__
52 
53 extern void replay_system_reset(void);
54 extern void replay_soft_interrupts(void);
55 
56 extern void timer_interrupt(struct pt_regs *);
57 extern void timer_broadcast_interrupt(void);
58 extern void performance_monitor_exception(struct pt_regs *regs);
59 extern void WatchdogException(struct pt_regs *regs);
60 extern void unknown_exception(struct pt_regs *regs);
61 
62 #ifdef CONFIG_PPC64
63 #include <asm/paca.h>
64 
irq_soft_mask_return(void)65 static inline notrace unsigned long irq_soft_mask_return(void)
66 {
67 	unsigned long flags;
68 
69 	asm volatile(
70 		"lbz %0,%1(13)"
71 		: "=r" (flags)
72 		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
73 
74 	return flags;
75 }
76 
77 /*
78  * The "memory" clobber acts as both a compiler barrier
79  * for the critical section and as a clobber because
80  * we changed paca->irq_soft_mask
81  */
irq_soft_mask_set(unsigned long mask)82 static inline notrace void irq_soft_mask_set(unsigned long mask)
83 {
84 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
85 	/*
86 	 * The irq mask must always include the STD bit if any are set.
87 	 *
88 	 * and interrupts don't get replayed until the standard
89 	 * interrupt (local_irq_disable()) is unmasked.
90 	 *
91 	 * Other masks must only provide additional masking beyond
92 	 * the standard, and they are also not replayed until the
93 	 * standard interrupt becomes unmasked.
94 	 *
95 	 * This could be changed, but it will require partial
96 	 * unmasks to be replayed, among other things. For now, take
97 	 * the simple approach.
98 	 */
99 	WARN_ON(mask && !(mask & IRQS_DISABLED));
100 #endif
101 
102 	asm volatile(
103 		"stb %0,%1(13)"
104 		:
105 		: "r" (mask),
106 		  "i" (offsetof(struct paca_struct, irq_soft_mask))
107 		: "memory");
108 }
109 
irq_soft_mask_set_return(unsigned long mask)110 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
111 {
112 	unsigned long flags;
113 
114 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
115 	WARN_ON(mask && !(mask & IRQS_DISABLED));
116 #endif
117 
118 	asm volatile(
119 		"lbz %0,%1(13); stb %2,%1(13)"
120 		: "=&r" (flags)
121 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
122 		  "r" (mask)
123 		: "memory");
124 
125 	return flags;
126 }
127 
irq_soft_mask_or_return(unsigned long mask)128 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
129 {
130 	unsigned long flags, tmp;
131 
132 	asm volatile(
133 		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
134 		: "=&r" (flags), "=r" (tmp)
135 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
136 		  "r" (mask)
137 		: "memory");
138 
139 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
140 	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
141 #endif
142 
143 	return flags;
144 }
145 
arch_local_save_flags(void)146 static inline unsigned long arch_local_save_flags(void)
147 {
148 	return irq_soft_mask_return();
149 }
150 
arch_local_irq_disable(void)151 static inline void arch_local_irq_disable(void)
152 {
153 	irq_soft_mask_set(IRQS_DISABLED);
154 }
155 
156 extern void arch_local_irq_restore(unsigned long);
157 
arch_local_irq_enable(void)158 static inline void arch_local_irq_enable(void)
159 {
160 	arch_local_irq_restore(IRQS_ENABLED);
161 }
162 
arch_local_irq_save(void)163 static inline unsigned long arch_local_irq_save(void)
164 {
165 	return irq_soft_mask_set_return(IRQS_DISABLED);
166 }
167 
arch_irqs_disabled_flags(unsigned long flags)168 static inline bool arch_irqs_disabled_flags(unsigned long flags)
169 {
170 	return flags & IRQS_DISABLED;
171 }
172 
arch_irqs_disabled(void)173 static inline bool arch_irqs_disabled(void)
174 {
175 	return arch_irqs_disabled_flags(arch_local_save_flags());
176 }
177 
178 #ifdef CONFIG_PPC_BOOK3S
179 /*
180  * To support disabling and enabling of irq with PMI, set of
181  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
182  * functions are added. These macros are implemented using generic
183  * linux local_irq_* code from include/linux/irqflags.h.
184  */
185 #define raw_local_irq_pmu_save(flags)					\
186 	do {								\
187 		typecheck(unsigned long, flags);			\
188 		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
189 				IRQS_PMI_DISABLED);			\
190 	} while(0)
191 
192 #define raw_local_irq_pmu_restore(flags)				\
193 	do {								\
194 		typecheck(unsigned long, flags);			\
195 		arch_local_irq_restore(flags);				\
196 	} while(0)
197 
198 #ifdef CONFIG_TRACE_IRQFLAGS
199 #define powerpc_local_irq_pmu_save(flags)			\
200 	 do {							\
201 		raw_local_irq_pmu_save(flags);			\
202 		if (!raw_irqs_disabled_flags(flags))		\
203 			trace_hardirqs_off();			\
204 	} while(0)
205 #define powerpc_local_irq_pmu_restore(flags)			\
206 	do {							\
207 		if (!raw_irqs_disabled_flags(flags))		\
208 			trace_hardirqs_on();			\
209 		raw_local_irq_pmu_restore(flags);		\
210 	} while(0)
211 #else
212 #define powerpc_local_irq_pmu_save(flags)			\
213 	do {							\
214 		raw_local_irq_pmu_save(flags);			\
215 	} while(0)
216 #define powerpc_local_irq_pmu_restore(flags)			\
217 	do {							\
218 		raw_local_irq_pmu_restore(flags);		\
219 	} while (0)
220 #endif  /* CONFIG_TRACE_IRQFLAGS */
221 
222 #endif /* CONFIG_PPC_BOOK3S */
223 
224 #ifdef CONFIG_PPC_BOOK3E
225 #define __hard_irq_enable()	wrtee(MSR_EE)
226 #define __hard_irq_disable()	wrtee(0)
227 #define __hard_EE_RI_disable()	wrtee(0)
228 #define __hard_RI_enable()	do { } while (0)
229 #else
230 #define __hard_irq_enable()	__mtmsrd(MSR_EE|MSR_RI, 1)
231 #define __hard_irq_disable()	__mtmsrd(MSR_RI, 1)
232 #define __hard_EE_RI_disable()	__mtmsrd(0, 1)
233 #define __hard_RI_enable()	__mtmsrd(MSR_RI, 1)
234 #endif
235 
236 #define hard_irq_disable()	do {					\
237 	unsigned long flags;						\
238 	__hard_irq_disable();						\
239 	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
240 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
241 	if (!arch_irqs_disabled_flags(flags)) {				\
242 		asm ("stdx %%r1, 0, %1 ;"				\
243 		     : "=m" (local_paca->saved_r1)			\
244 		     : "b" (&local_paca->saved_r1));			\
245 		trace_hardirqs_off();					\
246 	}								\
247 } while(0)
248 
__lazy_irq_pending(u8 irq_happened)249 static inline bool __lazy_irq_pending(u8 irq_happened)
250 {
251 	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
252 }
253 
254 /*
255  * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
256  */
lazy_irq_pending(void)257 static inline bool lazy_irq_pending(void)
258 {
259 	return __lazy_irq_pending(get_paca()->irq_happened);
260 }
261 
262 /*
263  * Check if a lazy IRQ is pending, with no debugging checks.
264  * Should be called with IRQs hard disabled.
265  * For use in RI disabled code or other constrained situations.
266  */
lazy_irq_pending_nocheck(void)267 static inline bool lazy_irq_pending_nocheck(void)
268 {
269 	return __lazy_irq_pending(local_paca->irq_happened);
270 }
271 
272 /*
273  * This is called by asynchronous interrupts to conditionally
274  * re-enable hard interrupts after having cleared the source
275  * of the interrupt. They are kept disabled if there is a different
276  * soft-masked interrupt pending that requires hard masking.
277  */
may_hard_irq_enable(void)278 static inline void may_hard_irq_enable(void)
279 {
280 	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
281 		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
282 		__hard_irq_enable();
283 	}
284 }
285 
arch_irq_disabled_regs(struct pt_regs * regs)286 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
287 {
288 	return (regs->softe & IRQS_DISABLED);
289 }
290 
291 extern bool prep_irq_for_idle(void);
292 extern bool prep_irq_for_idle_irqsoff(void);
293 extern void irq_set_pending_from_srr1(unsigned long srr1);
294 
295 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
296 
297 extern void force_external_irq_replay(void);
298 
299 #else /* CONFIG_PPC64 */
300 
arch_local_save_flags(void)301 static inline unsigned long arch_local_save_flags(void)
302 {
303 	return mfmsr();
304 }
305 
arch_local_irq_restore(unsigned long flags)306 static inline void arch_local_irq_restore(unsigned long flags)
307 {
308 	if (IS_ENABLED(CONFIG_BOOKE))
309 		wrtee(flags);
310 	else
311 		mtmsr(flags);
312 }
313 
arch_local_irq_save(void)314 static inline unsigned long arch_local_irq_save(void)
315 {
316 	unsigned long flags = arch_local_save_flags();
317 
318 	if (IS_ENABLED(CONFIG_BOOKE))
319 		wrtee(0);
320 	else if (IS_ENABLED(CONFIG_PPC_8xx))
321 		wrtspr(SPRN_EID);
322 	else
323 		mtmsr(flags & ~MSR_EE);
324 
325 	return flags;
326 }
327 
arch_local_irq_disable(void)328 static inline void arch_local_irq_disable(void)
329 {
330 	if (IS_ENABLED(CONFIG_BOOKE))
331 		wrtee(0);
332 	else if (IS_ENABLED(CONFIG_PPC_8xx))
333 		wrtspr(SPRN_EID);
334 	else
335 		mtmsr(mfmsr() & ~MSR_EE);
336 }
337 
arch_local_irq_enable(void)338 static inline void arch_local_irq_enable(void)
339 {
340 	if (IS_ENABLED(CONFIG_BOOKE))
341 		wrtee(MSR_EE);
342 	else if (IS_ENABLED(CONFIG_PPC_8xx))
343 		wrtspr(SPRN_EIE);
344 	else
345 		mtmsr(mfmsr() | MSR_EE);
346 }
347 
arch_irqs_disabled_flags(unsigned long flags)348 static inline bool arch_irqs_disabled_flags(unsigned long flags)
349 {
350 	return (flags & MSR_EE) == 0;
351 }
352 
arch_irqs_disabled(void)353 static inline bool arch_irqs_disabled(void)
354 {
355 	return arch_irqs_disabled_flags(arch_local_save_flags());
356 }
357 
358 #define hard_irq_disable()		arch_local_irq_disable()
359 
arch_irq_disabled_regs(struct pt_regs * regs)360 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
361 {
362 	return !(regs->msr & MSR_EE);
363 }
364 
may_hard_irq_enable(void)365 static inline void may_hard_irq_enable(void) { }
366 
367 #endif /* CONFIG_PPC64 */
368 
369 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
370 
371 #endif  /* __ASSEMBLY__ */
372 #endif	/* __KERNEL__ */
373 #endif	/* _ASM_POWERPC_HW_IRQ_H */
374