1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
25 */
26
27 #undef DEBUG
28
29 #include <linux/export.h>
30 #include <linux/threads.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/signal.h>
33 #include <linux/sched.h>
34 #include <linux/ptrace.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/timex.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/irq.h>
42 #include <linux/seq_file.h>
43 #include <linux/cpumask.h>
44 #include <linux/profile.h>
45 #include <linux/bitops.h>
46 #include <linux/list.h>
47 #include <linux/radix-tree.h>
48 #include <linux/mutex.h>
49 #include <linux/pci.h>
50 #include <linux/debugfs.h>
51 #include <linux/of.h>
52 #include <linux/of_irq.h>
53 #include <linux/vmalloc.h>
54 #include <linux/pgtable.h>
55
56 #include <linux/uaccess.h>
57 #include <asm/interrupt.h>
58 #include <asm/io.h>
59 #include <asm/irq.h>
60 #include <asm/cache.h>
61 #include <asm/prom.h>
62 #include <asm/ptrace.h>
63 #include <asm/machdep.h>
64 #include <asm/udbg.h>
65 #include <asm/smp.h>
66 #include <asm/livepatch.h>
67 #include <asm/asm-prototypes.h>
68 #include <asm/hw_irq.h>
69 #include <asm/softirq_stack.h>
70
71 #ifdef CONFIG_PPC64
72 #include <asm/paca.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
75 #include <asm/dbell.h>
76 #endif
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
79 #include <asm/cpu_has_feature.h>
80
81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82 EXPORT_PER_CPU_SYMBOL(irq_stat);
83
84 #ifdef CONFIG_PPC32
85 atomic_t ppc_n_lost_interrupts;
86
87 #ifdef CONFIG_TAU_INT
88 extern int tau_initialized;
89 u32 tau_interrupts(unsigned long cpu);
90 #endif
91 #endif /* CONFIG_PPC32 */
92
93 #ifdef CONFIG_PPC64
94
95 int distribute_irqs = 1;
96
get_irq_happened(void)97 static inline notrace unsigned long get_irq_happened(void)
98 {
99 unsigned long happened;
100
101 __asm__ __volatile__("lbz %0,%1(13)"
102 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
103
104 return happened;
105 }
106
replay_soft_interrupts(void)107 void replay_soft_interrupts(void)
108 {
109 struct pt_regs regs;
110
111 /*
112 * Be careful here, calling these interrupt handlers can cause
113 * softirqs to be raised, which they may run when calling irq_exit,
114 * which will cause local_irq_enable() to be run, which can then
115 * recurse into this function. Don't keep any state across
116 * interrupt handler calls which may change underneath us.
117 *
118 * We use local_paca rather than get_paca() to avoid all the
119 * debug_smp_processor_id() business in this low level function.
120 */
121
122 ppc_save_regs(®s);
123 regs.softe = IRQS_ENABLED;
124
125 again:
126 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
127 WARN_ON_ONCE(mfmsr() & MSR_EE);
128
129 /*
130 * Force the delivery of pending soft-disabled interrupts on PS3.
131 * Any HV call will have this side effect.
132 */
133 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
134 u64 tmp, tmp2;
135 lv1_get_version_info(&tmp, &tmp2);
136 }
137
138 /*
139 * Check if an hypervisor Maintenance interrupt happened.
140 * This is a higher priority interrupt than the others, so
141 * replay it first.
142 */
143 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
144 local_paca->irq_happened &= ~PACA_IRQ_HMI;
145 regs.trap = INTERRUPT_HMI;
146 handle_hmi_exception(®s);
147 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
148 hard_irq_disable();
149 }
150
151 if (local_paca->irq_happened & PACA_IRQ_DEC) {
152 local_paca->irq_happened &= ~PACA_IRQ_DEC;
153 regs.trap = INTERRUPT_DECREMENTER;
154 timer_interrupt(®s);
155 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
156 hard_irq_disable();
157 }
158
159 if (local_paca->irq_happened & PACA_IRQ_EE) {
160 local_paca->irq_happened &= ~PACA_IRQ_EE;
161 regs.trap = INTERRUPT_EXTERNAL;
162 do_IRQ(®s);
163 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
164 hard_irq_disable();
165 }
166
167 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
168 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
169 regs.trap = INTERRUPT_DOORBELL;
170 doorbell_exception(®s);
171 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
172 hard_irq_disable();
173 }
174
175 /* Book3E does not support soft-masking PMI interrupts */
176 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
177 local_paca->irq_happened &= ~PACA_IRQ_PMI;
178 regs.trap = INTERRUPT_PERFMON;
179 performance_monitor_exception(®s);
180 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
181 hard_irq_disable();
182 }
183
184 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
185 /*
186 * We are responding to the next interrupt, so interrupt-off
187 * latencies should be reset here.
188 */
189 trace_hardirqs_on();
190 trace_hardirqs_off();
191 goto again;
192 }
193 }
194
195 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
replay_soft_interrupts_irqrestore(void)196 static inline void replay_soft_interrupts_irqrestore(void)
197 {
198 unsigned long kuap_state = get_kuap();
199
200 /*
201 * Check if anything calls local_irq_enable/restore() when KUAP is
202 * disabled (user access enabled). We handle that case here by saving
203 * and re-locking AMR but we shouldn't get here in the first place,
204 * hence the warning.
205 */
206 kuap_assert_locked();
207
208 if (kuap_state != AMR_KUAP_BLOCKED)
209 set_kuap(AMR_KUAP_BLOCKED);
210
211 replay_soft_interrupts();
212
213 if (kuap_state != AMR_KUAP_BLOCKED)
214 set_kuap(kuap_state);
215 }
216 #else
217 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
218 #endif
219
arch_local_irq_restore(unsigned long mask)220 notrace void arch_local_irq_restore(unsigned long mask)
221 {
222 unsigned char irq_happened;
223
224 /* Write the new soft-enabled value */
225 irq_soft_mask_set(mask);
226 if (mask)
227 return;
228
229 /*
230 * From this point onward, we can take interrupts, preempt,
231 * etc... unless we got hard-disabled. We check if an event
232 * happened. If none happened, we know we can just return.
233 *
234 * We may have preempted before the check below, in which case
235 * we are checking the "new" CPU instead of the old one. This
236 * is only a problem if an event happened on the "old" CPU.
237 *
238 * External interrupt events will have caused interrupts to
239 * be hard-disabled, so there is no problem, we
240 * cannot have preempted.
241 */
242 irq_happened = get_irq_happened();
243 if (!irq_happened) {
244 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
245 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
246 return;
247 }
248
249 /* We need to hard disable to replay. */
250 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
251 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
252 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
253 __hard_irq_disable();
254 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
255 } else {
256 /*
257 * We should already be hard disabled here. We had bugs
258 * where that wasn't the case so let's dbl check it and
259 * warn if we are wrong. Only do that when IRQ tracing
260 * is enabled as mfmsr() can be costly.
261 */
262 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
263 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
264 __hard_irq_disable();
265 }
266
267 if (irq_happened == PACA_IRQ_HARD_DIS) {
268 local_paca->irq_happened = 0;
269 __hard_irq_enable();
270 return;
271 }
272 }
273
274 /*
275 * Disable preempt here, so that the below preempt_enable will
276 * perform resched if required (a replayed interrupt may set
277 * need_resched).
278 */
279 preempt_disable();
280 irq_soft_mask_set(IRQS_ALL_DISABLED);
281 trace_hardirqs_off();
282
283 replay_soft_interrupts_irqrestore();
284 local_paca->irq_happened = 0;
285
286 trace_hardirqs_on();
287 irq_soft_mask_set(IRQS_ENABLED);
288 __hard_irq_enable();
289 preempt_enable();
290 }
291 EXPORT_SYMBOL(arch_local_irq_restore);
292
293 /*
294 * This is a helper to use when about to go into idle low-power
295 * when the latter has the side effect of re-enabling interrupts
296 * (such as calling H_CEDE under pHyp).
297 *
298 * You call this function with interrupts soft-disabled (this is
299 * already the case when ppc_md.power_save is called). The function
300 * will return whether to enter power save or just return.
301 *
302 * In the former case, it will have notified lockdep of interrupts
303 * being re-enabled and generally sanitized the lazy irq state,
304 * and in the latter case it will leave with interrupts hard
305 * disabled and marked as such, so the local_irq_enable() call
306 * in arch_cpu_idle() will properly re-enable everything.
307 */
prep_irq_for_idle(void)308 bool prep_irq_for_idle(void)
309 {
310 /*
311 * First we need to hard disable to ensure no interrupt
312 * occurs before we effectively enter the low power state
313 */
314 __hard_irq_disable();
315 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
316
317 /*
318 * If anything happened while we were soft-disabled,
319 * we return now and do not enter the low power state.
320 */
321 if (lazy_irq_pending())
322 return false;
323
324 /* Tell lockdep we are about to re-enable */
325 trace_hardirqs_on();
326
327 /*
328 * Mark interrupts as soft-enabled and clear the
329 * PACA_IRQ_HARD_DIS from the pending mask since we
330 * are about to hard enable as well as a side effect
331 * of entering the low power state.
332 */
333 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
334 irq_soft_mask_set(IRQS_ENABLED);
335
336 /* Tell the caller to enter the low power state */
337 return true;
338 }
339
340 #ifdef CONFIG_PPC_BOOK3S
341 /*
342 * This is for idle sequences that return with IRQs off, but the
343 * idle state itself wakes on interrupt. Tell the irq tracer that
344 * IRQs are enabled for the duration of idle so it does not get long
345 * off times. Must be paired with fini_irq_for_idle_irqsoff.
346 */
prep_irq_for_idle_irqsoff(void)347 bool prep_irq_for_idle_irqsoff(void)
348 {
349 WARN_ON(!irqs_disabled());
350
351 /*
352 * First we need to hard disable to ensure no interrupt
353 * occurs before we effectively enter the low power state
354 */
355 __hard_irq_disable();
356 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
357
358 /*
359 * If anything happened while we were soft-disabled,
360 * we return now and do not enter the low power state.
361 */
362 if (lazy_irq_pending())
363 return false;
364
365 /* Tell lockdep we are about to re-enable */
366 trace_hardirqs_on();
367
368 return true;
369 }
370
371 /*
372 * Take the SRR1 wakeup reason, index into this table to find the
373 * appropriate irq_happened bit.
374 *
375 * Sytem reset exceptions taken in idle state also come through here,
376 * but they are NMI interrupts so do not need to wait for IRQs to be
377 * restored, and should be taken as early as practical. These are marked
378 * with 0xff in the table. The Power ISA specifies 0100b as the system
379 * reset interrupt reason.
380 */
381 #define IRQ_SYSTEM_RESET 0xff
382
383 static const u8 srr1_to_lazyirq[0x10] = {
384 0, 0, 0,
385 PACA_IRQ_DBELL,
386 IRQ_SYSTEM_RESET,
387 PACA_IRQ_DBELL,
388 PACA_IRQ_DEC,
389 0,
390 PACA_IRQ_EE,
391 PACA_IRQ_EE,
392 PACA_IRQ_HMI,
393 0, 0, 0, 0, 0 };
394
replay_system_reset(void)395 void replay_system_reset(void)
396 {
397 struct pt_regs regs;
398
399 ppc_save_regs(®s);
400 regs.trap = 0x100;
401 get_paca()->in_nmi = 1;
402 system_reset_exception(®s);
403 get_paca()->in_nmi = 0;
404 }
405 EXPORT_SYMBOL_GPL(replay_system_reset);
406
irq_set_pending_from_srr1(unsigned long srr1)407 void irq_set_pending_from_srr1(unsigned long srr1)
408 {
409 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
410 u8 reason = srr1_to_lazyirq[idx];
411
412 /*
413 * Take the system reset now, which is immediately after registers
414 * are restored from idle. It's an NMI, so interrupts need not be
415 * re-enabled before it is taken.
416 */
417 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
418 replay_system_reset();
419 return;
420 }
421
422 if (reason == PACA_IRQ_DBELL) {
423 /*
424 * When doorbell triggers a system reset wakeup, the message
425 * is not cleared, so if the doorbell interrupt is replayed
426 * and the IPI handled, the doorbell interrupt would still
427 * fire when EE is enabled.
428 *
429 * To avoid taking the superfluous doorbell interrupt,
430 * execute a msgclr here before the interrupt is replayed.
431 */
432 ppc_msgclr(PPC_DBELL_MSGTYPE);
433 }
434
435 /*
436 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
437 * so this can be called unconditionally with the SRR1 wake
438 * reason as returned by the idle code, which uses 0 to mean no
439 * interrupt.
440 *
441 * If a future CPU was to designate this as an interrupt reason,
442 * then a new index for no interrupt must be assigned.
443 */
444 local_paca->irq_happened |= reason;
445 }
446 #endif /* CONFIG_PPC_BOOK3S */
447
448 /*
449 * Force a replay of the external interrupt handler on this CPU.
450 */
force_external_irq_replay(void)451 void force_external_irq_replay(void)
452 {
453 /*
454 * This must only be called with interrupts soft-disabled,
455 * the replay will happen when re-enabling.
456 */
457 WARN_ON(!arch_irqs_disabled());
458
459 /*
460 * Interrupts must always be hard disabled before irq_happened is
461 * modified (to prevent lost update in case of interrupt between
462 * load and store).
463 */
464 __hard_irq_disable();
465 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
466
467 /* Indicate in the PACA that we have an interrupt to replay */
468 local_paca->irq_happened |= PACA_IRQ_EE;
469 }
470
471 #endif /* CONFIG_PPC64 */
472
arch_show_interrupts(struct seq_file * p,int prec)473 int arch_show_interrupts(struct seq_file *p, int prec)
474 {
475 int j;
476
477 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
478 if (tau_initialized) {
479 seq_printf(p, "%*s: ", prec, "TAU");
480 for_each_online_cpu(j)
481 seq_printf(p, "%10u ", tau_interrupts(j));
482 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
483 }
484 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
485
486 seq_printf(p, "%*s: ", prec, "LOC");
487 for_each_online_cpu(j)
488 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
489 seq_printf(p, " Local timer interrupts for timer event device\n");
490
491 seq_printf(p, "%*s: ", prec, "BCT");
492 for_each_online_cpu(j)
493 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
494 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
495
496 seq_printf(p, "%*s: ", prec, "LOC");
497 for_each_online_cpu(j)
498 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
499 seq_printf(p, " Local timer interrupts for others\n");
500
501 seq_printf(p, "%*s: ", prec, "SPU");
502 for_each_online_cpu(j)
503 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
504 seq_printf(p, " Spurious interrupts\n");
505
506 seq_printf(p, "%*s: ", prec, "PMI");
507 for_each_online_cpu(j)
508 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
509 seq_printf(p, " Performance monitoring interrupts\n");
510
511 seq_printf(p, "%*s: ", prec, "MCE");
512 for_each_online_cpu(j)
513 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
514 seq_printf(p, " Machine check exceptions\n");
515
516 #ifdef CONFIG_PPC_BOOK3S_64
517 if (cpu_has_feature(CPU_FTR_HVMODE)) {
518 seq_printf(p, "%*s: ", prec, "HMI");
519 for_each_online_cpu(j)
520 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
521 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
522 }
523 #endif
524
525 seq_printf(p, "%*s: ", prec, "NMI");
526 for_each_online_cpu(j)
527 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
528 seq_printf(p, " System Reset interrupts\n");
529
530 #ifdef CONFIG_PPC_WATCHDOG
531 seq_printf(p, "%*s: ", prec, "WDG");
532 for_each_online_cpu(j)
533 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
534 seq_printf(p, " Watchdog soft-NMI interrupts\n");
535 #endif
536
537 #ifdef CONFIG_PPC_DOORBELL
538 if (cpu_has_feature(CPU_FTR_DBELL)) {
539 seq_printf(p, "%*s: ", prec, "DBL");
540 for_each_online_cpu(j)
541 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
542 seq_printf(p, " Doorbell interrupts\n");
543 }
544 #endif
545
546 return 0;
547 }
548
549 /*
550 * /proc/stat helpers
551 */
arch_irq_stat_cpu(unsigned int cpu)552 u64 arch_irq_stat_cpu(unsigned int cpu)
553 {
554 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
555
556 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
557 sum += per_cpu(irq_stat, cpu).pmu_irqs;
558 sum += per_cpu(irq_stat, cpu).mce_exceptions;
559 sum += per_cpu(irq_stat, cpu).spurious_irqs;
560 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
561 #ifdef CONFIG_PPC_BOOK3S_64
562 sum += paca_ptrs[cpu]->hmi_irqs;
563 #endif
564 sum += per_cpu(irq_stat, cpu).sreset_irqs;
565 #ifdef CONFIG_PPC_WATCHDOG
566 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
567 #endif
568 #ifdef CONFIG_PPC_DOORBELL
569 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
570 #endif
571
572 return sum;
573 }
574
check_stack_overflow(void)575 static inline void check_stack_overflow(void)
576 {
577 long sp;
578
579 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
580 return;
581
582 sp = current_stack_pointer & (THREAD_SIZE - 1);
583
584 /* check for stack overflow: is there less than 2KB free? */
585 if (unlikely(sp < 2048)) {
586 pr_err("do_IRQ: stack overflow: %ld\n", sp);
587 dump_stack();
588 }
589 }
590
call_do_softirq(const void * sp)591 static __always_inline void call_do_softirq(const void *sp)
592 {
593 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
594 asm volatile (
595 PPC_STLU " %%r1, %[offset](%[sp]) ;"
596 "mr %%r1, %[sp] ;"
597 "bl %[callee] ;"
598 PPC_LL " %%r1, 0(%%r1) ;"
599 : // Outputs
600 : // Inputs
601 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
602 [callee] "i" (__do_softirq)
603 : // Clobbers
604 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
605 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
606 "r11", "r12"
607 );
608 }
609
call_do_irq(struct pt_regs * regs,void * sp)610 static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
611 {
612 register unsigned long r3 asm("r3") = (unsigned long)regs;
613
614 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
615 asm volatile (
616 PPC_STLU " %%r1, %[offset](%[sp]) ;"
617 "mr %%r1, %[sp] ;"
618 "bl %[callee] ;"
619 PPC_LL " %%r1, 0(%%r1) ;"
620 : // Outputs
621 "+r" (r3)
622 : // Inputs
623 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
624 [callee] "i" (__do_irq)
625 : // Clobbers
626 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
627 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
628 "r11", "r12"
629 );
630 }
631
__do_irq(struct pt_regs * regs)632 void __do_irq(struct pt_regs *regs)
633 {
634 unsigned int irq;
635
636 trace_irq_entry(regs);
637
638 /*
639 * Query the platform PIC for the interrupt & ack it.
640 *
641 * This will typically lower the interrupt line to the CPU
642 */
643 irq = ppc_md.get_irq();
644
645 /* We can hard enable interrupts now to allow perf interrupts */
646 may_hard_irq_enable();
647
648 /* And finally process it */
649 if (unlikely(!irq))
650 __this_cpu_inc(irq_stat.spurious_irqs);
651 else
652 generic_handle_irq(irq);
653
654 trace_irq_exit(regs);
655 }
656
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)657 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
658 {
659 struct pt_regs *old_regs = set_irq_regs(regs);
660 void *cursp, *irqsp, *sirqsp;
661
662 /* Switch to the irq stack to handle this */
663 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
664 irqsp = hardirq_ctx[raw_smp_processor_id()];
665 sirqsp = softirq_ctx[raw_smp_processor_id()];
666
667 check_stack_overflow();
668
669 /* Already there ? */
670 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
671 __do_irq(regs);
672 set_irq_regs(old_regs);
673 return;
674 }
675 /* Switch stack and call */
676 call_do_irq(regs, irqsp);
677
678 set_irq_regs(old_regs);
679 }
680
alloc_vm_stack(void)681 static void *__init alloc_vm_stack(void)
682 {
683 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
684 NUMA_NO_NODE, (void *)_RET_IP_);
685 }
686
vmap_irqstack_init(void)687 static void __init vmap_irqstack_init(void)
688 {
689 int i;
690
691 for_each_possible_cpu(i) {
692 softirq_ctx[i] = alloc_vm_stack();
693 hardirq_ctx[i] = alloc_vm_stack();
694 }
695 }
696
697
init_IRQ(void)698 void __init init_IRQ(void)
699 {
700 if (IS_ENABLED(CONFIG_VMAP_STACK))
701 vmap_irqstack_init();
702
703 if (ppc_md.init_IRQ)
704 ppc_md.init_IRQ();
705 }
706
707 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
708 void *critirq_ctx[NR_CPUS] __read_mostly;
709 void *dbgirq_ctx[NR_CPUS] __read_mostly;
710 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
711 #endif
712
713 void *softirq_ctx[NR_CPUS] __read_mostly;
714 void *hardirq_ctx[NR_CPUS] __read_mostly;
715
do_softirq_own_stack(void)716 void do_softirq_own_stack(void)
717 {
718 call_do_softirq(softirq_ctx[smp_processor_id()]);
719 }
720
virq_to_hw(unsigned int virq)721 irq_hw_number_t virq_to_hw(unsigned int virq)
722 {
723 struct irq_data *irq_data = irq_get_irq_data(virq);
724 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
725 }
726 EXPORT_SYMBOL_GPL(virq_to_hw);
727
728 #ifdef CONFIG_SMP
irq_choose_cpu(const struct cpumask * mask)729 int irq_choose_cpu(const struct cpumask *mask)
730 {
731 int cpuid;
732
733 if (cpumask_equal(mask, cpu_online_mask)) {
734 static int irq_rover;
735 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
736 unsigned long flags;
737
738 /* Round-robin distribution... */
739 do_round_robin:
740 raw_spin_lock_irqsave(&irq_rover_lock, flags);
741
742 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
743 if (irq_rover >= nr_cpu_ids)
744 irq_rover = cpumask_first(cpu_online_mask);
745
746 cpuid = irq_rover;
747
748 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
749 } else {
750 cpuid = cpumask_first_and(mask, cpu_online_mask);
751 if (cpuid >= nr_cpu_ids)
752 goto do_round_robin;
753 }
754
755 return get_hard_smp_processor_id(cpuid);
756 }
757 #else
irq_choose_cpu(const struct cpumask * mask)758 int irq_choose_cpu(const struct cpumask *mask)
759 {
760 return hard_smp_processor_id();
761 }
762 #endif
763
764 #ifdef CONFIG_PPC64
setup_noirqdistrib(char * str)765 static int __init setup_noirqdistrib(char *str)
766 {
767 distribute_irqs = 0;
768 return 1;
769 }
770
771 __setup("noirqdistrib", setup_noirqdistrib);
772 #endif /* CONFIG_PPC64 */
773