1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4  */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7 
8 #ifdef __KERNEL__
9 
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14 
15 #ifdef CONFIG_PPC64
16 
17 /*
18  * PACA flags in paca->irq_happened.
19  *
20  * This bits are set when interrupts occur while soft-disabled
21  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22  * is set whenever we manually hard disable.
23  */
24 #define PACA_IRQ_HARD_DIS	0x01
25 #define PACA_IRQ_DBELL		0x02
26 #define PACA_IRQ_EE		0x04
27 #define PACA_IRQ_DEC		0x08 /* Or FIT */
28 #define PACA_IRQ_HMI		0x10
29 #define PACA_IRQ_PMI		0x20
30 
31 /*
32  * Some soft-masked interrupts must be hard masked until they are replayed
33  * (e.g., because the soft-masked handler does not clear the exception).
34  */
35 #ifdef CONFIG_PPC_BOOK3S
36 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
37 #else
38 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
39 #endif
40 
41 #endif /* CONFIG_PPC64 */
42 
43 /*
44  * flags for paca->irq_soft_mask
45  */
46 #define IRQS_ENABLED		0
47 #define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
48 #define IRQS_PMI_DISABLED	2
49 #define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
50 
51 #ifndef __ASSEMBLY__
52 
__hard_irq_enable(void)53 static inline void __hard_irq_enable(void)
54 {
55 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
56 		wrtee(MSR_EE);
57 	else if (IS_ENABLED(CONFIG_PPC_8xx))
58 		wrtspr(SPRN_EIE);
59 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
60 		__mtmsrd(MSR_EE | MSR_RI, 1);
61 	else
62 		mtmsr(mfmsr() | MSR_EE);
63 }
64 
__hard_irq_disable(void)65 static inline void __hard_irq_disable(void)
66 {
67 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
68 		wrtee(0);
69 	else if (IS_ENABLED(CONFIG_PPC_8xx))
70 		wrtspr(SPRN_EID);
71 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
72 		__mtmsrd(MSR_RI, 1);
73 	else
74 		mtmsr(mfmsr() & ~MSR_EE);
75 }
76 
__hard_EE_RI_disable(void)77 static inline void __hard_EE_RI_disable(void)
78 {
79 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
80 		wrtee(0);
81 	else if (IS_ENABLED(CONFIG_PPC_8xx))
82 		wrtspr(SPRN_NRI);
83 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
84 		__mtmsrd(0, 1);
85 	else
86 		mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
87 }
88 
__hard_RI_enable(void)89 static inline void __hard_RI_enable(void)
90 {
91 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
92 		return;
93 
94 	if (IS_ENABLED(CONFIG_PPC_8xx))
95 		wrtspr(SPRN_EID);
96 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
97 		__mtmsrd(MSR_RI, 1);
98 	else
99 		mtmsr(mfmsr() | MSR_RI);
100 }
101 
102 #ifdef CONFIG_PPC64
103 #include <asm/paca.h>
104 
irq_soft_mask_return(void)105 static inline notrace unsigned long irq_soft_mask_return(void)
106 {
107 	unsigned long flags;
108 
109 	asm volatile(
110 		"lbz %0,%1(13)"
111 		: "=r" (flags)
112 		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
113 
114 	return flags;
115 }
116 
117 /*
118  * The "memory" clobber acts as both a compiler barrier
119  * for the critical section and as a clobber because
120  * we changed paca->irq_soft_mask
121  */
irq_soft_mask_set(unsigned long mask)122 static inline notrace void irq_soft_mask_set(unsigned long mask)
123 {
124 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
125 	/*
126 	 * The irq mask must always include the STD bit if any are set.
127 	 *
128 	 * and interrupts don't get replayed until the standard
129 	 * interrupt (local_irq_disable()) is unmasked.
130 	 *
131 	 * Other masks must only provide additional masking beyond
132 	 * the standard, and they are also not replayed until the
133 	 * standard interrupt becomes unmasked.
134 	 *
135 	 * This could be changed, but it will require partial
136 	 * unmasks to be replayed, among other things. For now, take
137 	 * the simple approach.
138 	 */
139 	WARN_ON(mask && !(mask & IRQS_DISABLED));
140 #endif
141 
142 	asm volatile(
143 		"stb %0,%1(13)"
144 		:
145 		: "r" (mask),
146 		  "i" (offsetof(struct paca_struct, irq_soft_mask))
147 		: "memory");
148 }
149 
irq_soft_mask_set_return(unsigned long mask)150 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
151 {
152 	unsigned long flags;
153 
154 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
155 	WARN_ON(mask && !(mask & IRQS_DISABLED));
156 #endif
157 
158 	asm volatile(
159 		"lbz %0,%1(13); stb %2,%1(13)"
160 		: "=&r" (flags)
161 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
162 		  "r" (mask)
163 		: "memory");
164 
165 	return flags;
166 }
167 
irq_soft_mask_or_return(unsigned long mask)168 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
169 {
170 	unsigned long flags, tmp;
171 
172 	asm volatile(
173 		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
174 		: "=&r" (flags), "=r" (tmp)
175 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
176 		  "r" (mask)
177 		: "memory");
178 
179 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
180 	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
181 #endif
182 
183 	return flags;
184 }
185 
arch_local_save_flags(void)186 static inline unsigned long arch_local_save_flags(void)
187 {
188 	return irq_soft_mask_return();
189 }
190 
arch_local_irq_disable(void)191 static inline void arch_local_irq_disable(void)
192 {
193 	irq_soft_mask_set(IRQS_DISABLED);
194 }
195 
196 extern void arch_local_irq_restore(unsigned long);
197 
arch_local_irq_enable(void)198 static inline void arch_local_irq_enable(void)
199 {
200 	arch_local_irq_restore(IRQS_ENABLED);
201 }
202 
arch_local_irq_save(void)203 static inline unsigned long arch_local_irq_save(void)
204 {
205 	return irq_soft_mask_set_return(IRQS_DISABLED);
206 }
207 
arch_irqs_disabled_flags(unsigned long flags)208 static inline bool arch_irqs_disabled_flags(unsigned long flags)
209 {
210 	return flags & IRQS_DISABLED;
211 }
212 
arch_irqs_disabled(void)213 static inline bool arch_irqs_disabled(void)
214 {
215 	return arch_irqs_disabled_flags(arch_local_save_flags());
216 }
217 
218 #ifdef CONFIG_PPC_BOOK3S
219 /*
220  * To support disabling and enabling of irq with PMI, set of
221  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
222  * functions are added. These macros are implemented using generic
223  * linux local_irq_* code from include/linux/irqflags.h.
224  */
225 #define raw_local_irq_pmu_save(flags)					\
226 	do {								\
227 		typecheck(unsigned long, flags);			\
228 		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
229 				IRQS_PMI_DISABLED);			\
230 	} while(0)
231 
232 #define raw_local_irq_pmu_restore(flags)				\
233 	do {								\
234 		typecheck(unsigned long, flags);			\
235 		arch_local_irq_restore(flags);				\
236 	} while(0)
237 
238 #ifdef CONFIG_TRACE_IRQFLAGS
239 #define powerpc_local_irq_pmu_save(flags)			\
240 	 do {							\
241 		raw_local_irq_pmu_save(flags);			\
242 		if (!raw_irqs_disabled_flags(flags))		\
243 			trace_hardirqs_off();			\
244 	} while(0)
245 #define powerpc_local_irq_pmu_restore(flags)			\
246 	do {							\
247 		if (!raw_irqs_disabled_flags(flags))		\
248 			trace_hardirqs_on();			\
249 		raw_local_irq_pmu_restore(flags);		\
250 	} while(0)
251 #else
252 #define powerpc_local_irq_pmu_save(flags)			\
253 	do {							\
254 		raw_local_irq_pmu_save(flags);			\
255 	} while(0)
256 #define powerpc_local_irq_pmu_restore(flags)			\
257 	do {							\
258 		raw_local_irq_pmu_restore(flags);		\
259 	} while (0)
260 #endif  /* CONFIG_TRACE_IRQFLAGS */
261 
262 #endif /* CONFIG_PPC_BOOK3S */
263 
264 #define hard_irq_disable()	do {					\
265 	unsigned long flags;						\
266 	__hard_irq_disable();						\
267 	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
268 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
269 	if (!arch_irqs_disabled_flags(flags)) {				\
270 		asm ("stdx %%r1, 0, %1 ;"				\
271 		     : "=m" (local_paca->saved_r1)			\
272 		     : "b" (&local_paca->saved_r1));			\
273 		trace_hardirqs_off();					\
274 	}								\
275 } while(0)
276 
__lazy_irq_pending(u8 irq_happened)277 static inline bool __lazy_irq_pending(u8 irq_happened)
278 {
279 	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
280 }
281 
282 /*
283  * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
284  */
lazy_irq_pending(void)285 static inline bool lazy_irq_pending(void)
286 {
287 	return __lazy_irq_pending(get_paca()->irq_happened);
288 }
289 
290 /*
291  * Check if a lazy IRQ is pending, with no debugging checks.
292  * Should be called with IRQs hard disabled.
293  * For use in RI disabled code or other constrained situations.
294  */
lazy_irq_pending_nocheck(void)295 static inline bool lazy_irq_pending_nocheck(void)
296 {
297 	return __lazy_irq_pending(local_paca->irq_happened);
298 }
299 
300 /*
301  * This is called by asynchronous interrupts to conditionally
302  * re-enable hard interrupts after having cleared the source
303  * of the interrupt. They are kept disabled if there is a different
304  * soft-masked interrupt pending that requires hard masking.
305  */
may_hard_irq_enable(void)306 static inline void may_hard_irq_enable(void)
307 {
308 	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
309 		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
310 		__hard_irq_enable();
311 	}
312 }
313 
arch_irq_disabled_regs(struct pt_regs * regs)314 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
315 {
316 	return (regs->softe & IRQS_DISABLED);
317 }
318 
319 extern bool prep_irq_for_idle(void);
320 extern bool prep_irq_for_idle_irqsoff(void);
321 extern void irq_set_pending_from_srr1(unsigned long srr1);
322 
323 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
324 
325 extern void force_external_irq_replay(void);
326 
irq_soft_mask_regs_set_state(struct pt_regs * regs,unsigned long val)327 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
328 {
329 	regs->softe = val;
330 }
331 #else /* CONFIG_PPC64 */
332 
irq_soft_mask_return(void)333 static inline notrace unsigned long irq_soft_mask_return(void)
334 {
335 	return 0;
336 }
337 
arch_local_save_flags(void)338 static inline unsigned long arch_local_save_flags(void)
339 {
340 	return mfmsr();
341 }
342 
arch_local_irq_restore(unsigned long flags)343 static inline void arch_local_irq_restore(unsigned long flags)
344 {
345 	if (IS_ENABLED(CONFIG_BOOKE))
346 		wrtee(flags);
347 	else
348 		mtmsr(flags);
349 }
350 
arch_local_irq_save(void)351 static inline unsigned long arch_local_irq_save(void)
352 {
353 	unsigned long flags = arch_local_save_flags();
354 
355 	if (IS_ENABLED(CONFIG_BOOKE))
356 		wrtee(0);
357 	else if (IS_ENABLED(CONFIG_PPC_8xx))
358 		wrtspr(SPRN_EID);
359 	else
360 		mtmsr(flags & ~MSR_EE);
361 
362 	return flags;
363 }
364 
arch_local_irq_disable(void)365 static inline void arch_local_irq_disable(void)
366 {
367 	__hard_irq_disable();
368 }
369 
arch_local_irq_enable(void)370 static inline void arch_local_irq_enable(void)
371 {
372 	__hard_irq_enable();
373 }
374 
arch_irqs_disabled_flags(unsigned long flags)375 static inline bool arch_irqs_disabled_flags(unsigned long flags)
376 {
377 	return (flags & MSR_EE) == 0;
378 }
379 
arch_irqs_disabled(void)380 static inline bool arch_irqs_disabled(void)
381 {
382 	return arch_irqs_disabled_flags(arch_local_save_flags());
383 }
384 
385 #define hard_irq_disable()		arch_local_irq_disable()
386 
arch_irq_disabled_regs(struct pt_regs * regs)387 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
388 {
389 	return !(regs->msr & MSR_EE);
390 }
391 
may_hard_irq_enable(void)392 static inline void may_hard_irq_enable(void) { }
393 
irq_soft_mask_regs_set_state(struct pt_regs * regs,unsigned long val)394 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
395 {
396 }
397 #endif /* CONFIG_PPC64 */
398 
399 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
400 
401 #endif  /* __ASSEMBLY__ */
402 #endif	/* __KERNEL__ */
403 #endif	/* _ASM_POWERPC_HW_IRQ_H */
404