1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
setup_forced_irqthreads(char * arg)30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34 }
35 early_param("threadirqs", setup_forced_irqthreads);
36 #endif
37
__synchronize_hardirq(struct irq_desc * desc,bool sync_chip)38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 {
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46 /*
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
49 */
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57 /*
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
61 */
62 if (!inprogress && sync_chip) {
63 /*
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
66 */
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72 /* Oops, that failed? */
73 } while (inprogress);
74 }
75
76 /**
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
79 *
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
84 * into account.
85 *
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
88 *
89 * Returns: false if a threaded handler is active.
90 *
91 * This function may be called - with care - from IRQ context.
92 *
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
96 * is the current CPU.
97 */
synchronize_hardirq(unsigned int irq)98 bool synchronize_hardirq(unsigned int irq)
99 {
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108 }
109 EXPORT_SYMBOL(synchronize_hardirq);
110
__synchronize_irq(struct irq_desc * desc)111 static void __synchronize_irq(struct irq_desc *desc)
112 {
113 __synchronize_hardirq(desc, true);
114 /*
115 * We made sure that no hardirq handler is running. Now verify that no
116 * threaded handlers are active.
117 */
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
119 }
120
121 /**
122 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123 * @irq: interrupt number to wait for
124 *
125 * This function waits for any pending IRQ handlers for this interrupt
126 * to complete before returning. If you use this function while
127 * holding a resource the IRQ handler may need you will deadlock.
128 *
129 * Can only be called from preemptible code as it might sleep when
130 * an interrupt thread is associated to @irq.
131 *
132 * It optionally makes sure (when the irq chip supports that method)
133 * that the interrupt is not pending in any CPU and waiting for
134 * service.
135 */
synchronize_irq(unsigned int irq)136 void synchronize_irq(unsigned int irq)
137 {
138 struct irq_desc *desc = irq_to_desc(irq);
139
140 if (desc)
141 __synchronize_irq(desc);
142 }
143 EXPORT_SYMBOL(synchronize_irq);
144
145 #ifdef CONFIG_SMP
146 cpumask_var_t irq_default_affinity;
147
__irq_can_set_affinity(struct irq_desc * desc)148 static bool __irq_can_set_affinity(struct irq_desc *desc)
149 {
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
152 return false;
153 return true;
154 }
155
156 /**
157 * irq_can_set_affinity - Check if the affinity of a given irq can be set
158 * @irq: Interrupt to check
159 *
160 */
irq_can_set_affinity(unsigned int irq)161 int irq_can_set_affinity(unsigned int irq)
162 {
163 return __irq_can_set_affinity(irq_to_desc(irq));
164 }
165
166 /**
167 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168 * @irq: Interrupt to check
169 *
170 * Like irq_can_set_affinity() above, but additionally checks for the
171 * AFFINITY_MANAGED flag.
172 */
irq_can_set_affinity_usr(unsigned int irq)173 bool irq_can_set_affinity_usr(unsigned int irq)
174 {
175 struct irq_desc *desc = irq_to_desc(irq);
176
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
179 }
180
181 /**
182 * irq_set_thread_affinity - Notify irq threads to adjust affinity
183 * @desc: irq descriptor which has affinity changed
184 *
185 * We just set IRQTF_AFFINITY and delegate the affinity setting
186 * to the interrupt thread itself. We can not call
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 * code can be called from hard interrupt context.
189 */
irq_set_thread_affinity(struct irq_desc * desc)190 void irq_set_thread_affinity(struct irq_desc *desc)
191 {
192 struct irqaction *action;
193
194 for_each_action_of_desc(desc, action) {
195 if (action->thread) {
196 set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 wake_up_process(action->thread);
198 }
199 if (action->secondary && action->secondary->thread) {
200 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
201 wake_up_process(action->secondary->thread);
202 }
203 }
204 }
205
206 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_validate_effective_affinity(struct irq_data * data)207 static void irq_validate_effective_affinity(struct irq_data *data)
208 {
209 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
210 struct irq_chip *chip = irq_data_get_irq_chip(data);
211
212 if (!cpumask_empty(m))
213 return;
214 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
215 chip->name, data->irq);
216 }
217 #else
irq_validate_effective_affinity(struct irq_data * data)218 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
219 #endif
220
irq_do_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223 {
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 const struct cpumask *prog_mask;
227 int ret;
228
229 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
230 static struct cpumask tmp_mask;
231
232 if (!chip || !chip->irq_set_affinity)
233 return -EINVAL;
234
235 raw_spin_lock(&tmp_mask_lock);
236 /*
237 * If this is a managed interrupt and housekeeping is enabled on
238 * it check whether the requested affinity mask intersects with
239 * a housekeeping CPU. If so, then remove the isolated CPUs from
240 * the mask and just keep the housekeeping CPU(s). This prevents
241 * the affinity setter from routing the interrupt to an isolated
242 * CPU to avoid that I/O submitted from a housekeeping CPU causes
243 * interrupts on an isolated one.
244 *
245 * If the masks do not intersect or include online CPU(s) then
246 * keep the requested mask. The isolated target CPUs are only
247 * receiving interrupts when the I/O operation was submitted
248 * directly from them.
249 *
250 * If all housekeeping CPUs in the affinity mask are offline, the
251 * interrupt will be migrated by the CPU hotplug code once a
252 * housekeeping CPU which belongs to the affinity mask comes
253 * online.
254 */
255 if (irqd_affinity_is_managed(data) &&
256 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
257 const struct cpumask *hk_mask;
258
259 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
260
261 cpumask_and(&tmp_mask, mask, hk_mask);
262 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
263 prog_mask = mask;
264 else
265 prog_mask = &tmp_mask;
266 } else {
267 prog_mask = mask;
268 }
269
270 /*
271 * Make sure we only provide online CPUs to the irqchip,
272 * unless we are being asked to force the affinity (in which
273 * case we do as we are told).
274 */
275 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
276 if (!force && !cpumask_empty(&tmp_mask))
277 ret = chip->irq_set_affinity(data, &tmp_mask, force);
278 else if (force)
279 ret = chip->irq_set_affinity(data, mask, force);
280 else
281 ret = -EINVAL;
282
283 raw_spin_unlock(&tmp_mask_lock);
284
285 switch (ret) {
286 case IRQ_SET_MASK_OK:
287 case IRQ_SET_MASK_OK_DONE:
288 cpumask_copy(desc->irq_common_data.affinity, mask);
289 fallthrough;
290 case IRQ_SET_MASK_OK_NOCOPY:
291 irq_validate_effective_affinity(data);
292 irq_set_thread_affinity(desc);
293 ret = 0;
294 }
295
296 return ret;
297 }
298
299 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)300 static inline int irq_set_affinity_pending(struct irq_data *data,
301 const struct cpumask *dest)
302 {
303 struct irq_desc *desc = irq_data_to_desc(data);
304
305 irqd_set_move_pending(data);
306 irq_copy_pending(desc, dest);
307 return 0;
308 }
309 #else
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)310 static inline int irq_set_affinity_pending(struct irq_data *data,
311 const struct cpumask *dest)
312 {
313 return -EBUSY;
314 }
315 #endif
316
irq_try_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)317 static int irq_try_set_affinity(struct irq_data *data,
318 const struct cpumask *dest, bool force)
319 {
320 int ret = irq_do_set_affinity(data, dest, force);
321
322 /*
323 * In case that the underlying vector management is busy and the
324 * architecture supports the generic pending mechanism then utilize
325 * this to avoid returning an error to user space.
326 */
327 if (ret == -EBUSY && !force)
328 ret = irq_set_affinity_pending(data, dest);
329 return ret;
330 }
331
irq_set_affinity_deactivated(struct irq_data * data,const struct cpumask * mask)332 static bool irq_set_affinity_deactivated(struct irq_data *data,
333 const struct cpumask *mask)
334 {
335 struct irq_desc *desc = irq_data_to_desc(data);
336
337 /*
338 * Handle irq chips which can handle affinity only in activated
339 * state correctly
340 *
341 * If the interrupt is not yet activated, just store the affinity
342 * mask and do not call the chip driver at all. On activation the
343 * driver has to make sure anyway that the interrupt is in a
344 * usable state so startup works.
345 */
346 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
347 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
348 return false;
349
350 cpumask_copy(desc->irq_common_data.affinity, mask);
351 irq_data_update_effective_affinity(data, mask);
352 irqd_set(data, IRQD_AFFINITY_SET);
353 return true;
354 }
355
irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask,bool force)356 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
357 bool force)
358 {
359 struct irq_chip *chip = irq_data_get_irq_chip(data);
360 struct irq_desc *desc = irq_data_to_desc(data);
361 int ret = 0;
362
363 if (!chip || !chip->irq_set_affinity)
364 return -EINVAL;
365
366 if (irq_set_affinity_deactivated(data, mask))
367 return 0;
368
369 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
370 ret = irq_try_set_affinity(data, mask, force);
371 } else {
372 irqd_set_move_pending(data);
373 irq_copy_pending(desc, mask);
374 }
375
376 if (desc->affinity_notify) {
377 kref_get(&desc->affinity_notify->kref);
378 if (!schedule_work(&desc->affinity_notify->work)) {
379 /* Work was already scheduled, drop our extra ref */
380 kref_put(&desc->affinity_notify->kref,
381 desc->affinity_notify->release);
382 }
383 }
384 irqd_set(data, IRQD_AFFINITY_SET);
385
386 return ret;
387 }
388
389 /**
390 * irq_update_affinity_desc - Update affinity management for an interrupt
391 * @irq: The interrupt number to update
392 * @affinity: Pointer to the affinity descriptor
393 *
394 * This interface can be used to configure the affinity management of
395 * interrupts which have been allocated already.
396 *
397 * There are certain limitations on when it may be used - attempts to use it
398 * for when the kernel is configured for generic IRQ reservation mode (in
399 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
400 * managed/non-managed interrupt accounting. In addition, attempts to use it on
401 * an interrupt which is already started or which has already been configured
402 * as managed will also fail, as these mean invalid init state or double init.
403 */
irq_update_affinity_desc(unsigned int irq,struct irq_affinity_desc * affinity)404 int irq_update_affinity_desc(unsigned int irq,
405 struct irq_affinity_desc *affinity)
406 {
407 struct irq_desc *desc;
408 unsigned long flags;
409 bool activated;
410 int ret = 0;
411
412 /*
413 * Supporting this with the reservation scheme used by x86 needs
414 * some more thought. Fail it for now.
415 */
416 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
417 return -EOPNOTSUPP;
418
419 desc = irq_get_desc_buslock(irq, &flags, 0);
420 if (!desc)
421 return -EINVAL;
422
423 /* Requires the interrupt to be shut down */
424 if (irqd_is_started(&desc->irq_data)) {
425 ret = -EBUSY;
426 goto out_unlock;
427 }
428
429 /* Interrupts which are already managed cannot be modified */
430 if (irqd_affinity_is_managed(&desc->irq_data)) {
431 ret = -EBUSY;
432 goto out_unlock;
433 }
434
435 /*
436 * Deactivate the interrupt. That's required to undo
437 * anything an earlier activation has established.
438 */
439 activated = irqd_is_activated(&desc->irq_data);
440 if (activated)
441 irq_domain_deactivate_irq(&desc->irq_data);
442
443 if (affinity->is_managed) {
444 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
445 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
446 }
447
448 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
449
450 /* Restore the activation state */
451 if (activated)
452 irq_domain_activate_irq(&desc->irq_data, false);
453
454 out_unlock:
455 irq_put_desc_busunlock(desc, flags);
456 return ret;
457 }
458
__irq_set_affinity(unsigned int irq,const struct cpumask * mask,bool force)459 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
460 bool force)
461 {
462 struct irq_desc *desc = irq_to_desc(irq);
463 unsigned long flags;
464 int ret;
465
466 if (!desc)
467 return -EINVAL;
468
469 raw_spin_lock_irqsave(&desc->lock, flags);
470 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
471 raw_spin_unlock_irqrestore(&desc->lock, flags);
472 return ret;
473 }
474
475 /**
476 * irq_set_affinity - Set the irq affinity of a given irq
477 * @irq: Interrupt to set affinity
478 * @cpumask: cpumask
479 *
480 * Fails if cpumask does not contain an online CPU
481 */
irq_set_affinity(unsigned int irq,const struct cpumask * cpumask)482 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
483 {
484 return __irq_set_affinity(irq, cpumask, false);
485 }
486 EXPORT_SYMBOL_GPL(irq_set_affinity);
487
488 /**
489 * irq_force_affinity - Force the irq affinity of a given irq
490 * @irq: Interrupt to set affinity
491 * @cpumask: cpumask
492 *
493 * Same as irq_set_affinity, but without checking the mask against
494 * online cpus.
495 *
496 * Solely for low level cpu hotplug code, where we need to make per
497 * cpu interrupts affine before the cpu becomes online.
498 */
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)499 int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
500 {
501 return __irq_set_affinity(irq, cpumask, true);
502 }
503 EXPORT_SYMBOL_GPL(irq_force_affinity);
504
__irq_apply_affinity_hint(unsigned int irq,const struct cpumask * m,bool setaffinity)505 int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
506 bool setaffinity)
507 {
508 unsigned long flags;
509 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510
511 if (!desc)
512 return -EINVAL;
513 desc->affinity_hint = m;
514 irq_put_desc_unlock(desc, flags);
515 if (m && setaffinity)
516 __irq_set_affinity(irq, m, false);
517 return 0;
518 }
519 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
520
irq_affinity_notify(struct work_struct * work)521 static void irq_affinity_notify(struct work_struct *work)
522 {
523 struct irq_affinity_notify *notify =
524 container_of(work, struct irq_affinity_notify, work);
525 struct irq_desc *desc = irq_to_desc(notify->irq);
526 cpumask_var_t cpumask;
527 unsigned long flags;
528
529 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
530 goto out;
531
532 raw_spin_lock_irqsave(&desc->lock, flags);
533 if (irq_move_pending(&desc->irq_data))
534 irq_get_pending(cpumask, desc);
535 else
536 cpumask_copy(cpumask, desc->irq_common_data.affinity);
537 raw_spin_unlock_irqrestore(&desc->lock, flags);
538
539 notify->notify(notify, cpumask);
540
541 free_cpumask_var(cpumask);
542 out:
543 kref_put(¬ify->kref, notify->release);
544 }
545
546 /**
547 * irq_set_affinity_notifier - control notification of IRQ affinity changes
548 * @irq: Interrupt for which to enable/disable notification
549 * @notify: Context for notification, or %NULL to disable
550 * notification. Function pointers must be initialised;
551 * the other fields will be initialised by this function.
552 *
553 * Must be called in process context. Notification may only be enabled
554 * after the IRQ is allocated and must be disabled before the IRQ is
555 * freed using free_irq().
556 */
557 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)558 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
559 {
560 struct irq_desc *desc = irq_to_desc(irq);
561 struct irq_affinity_notify *old_notify;
562 unsigned long flags;
563
564 /* The release function is promised process context */
565 might_sleep();
566
567 if (!desc || irq_is_nmi(desc))
568 return -EINVAL;
569
570 /* Complete initialisation of *notify */
571 if (notify) {
572 notify->irq = irq;
573 kref_init(¬ify->kref);
574 INIT_WORK(¬ify->work, irq_affinity_notify);
575 }
576
577 raw_spin_lock_irqsave(&desc->lock, flags);
578 old_notify = desc->affinity_notify;
579 desc->affinity_notify = notify;
580 raw_spin_unlock_irqrestore(&desc->lock, flags);
581
582 if (old_notify) {
583 if (cancel_work_sync(&old_notify->work)) {
584 /* Pending work had a ref, put that one too */
585 kref_put(&old_notify->kref, old_notify->release);
586 }
587 kref_put(&old_notify->kref, old_notify->release);
588 }
589
590 return 0;
591 }
592 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
593
594 #ifndef CONFIG_AUTO_IRQ_AFFINITY
595 /*
596 * Generic version of the affinity autoselector.
597 */
irq_setup_affinity(struct irq_desc * desc)598 int irq_setup_affinity(struct irq_desc *desc)
599 {
600 struct cpumask *set = irq_default_affinity;
601 int ret, node = irq_desc_get_node(desc);
602 static DEFINE_RAW_SPINLOCK(mask_lock);
603 static struct cpumask mask;
604
605 /* Excludes PER_CPU and NO_BALANCE interrupts */
606 if (!__irq_can_set_affinity(desc))
607 return 0;
608
609 raw_spin_lock(&mask_lock);
610 /*
611 * Preserve the managed affinity setting and a userspace affinity
612 * setup, but make sure that one of the targets is online.
613 */
614 if (irqd_affinity_is_managed(&desc->irq_data) ||
615 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
616 if (cpumask_intersects(desc->irq_common_data.affinity,
617 cpu_online_mask))
618 set = desc->irq_common_data.affinity;
619 else
620 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
621 }
622
623 cpumask_and(&mask, cpu_online_mask, set);
624 if (cpumask_empty(&mask))
625 cpumask_copy(&mask, cpu_online_mask);
626
627 if (node != NUMA_NO_NODE) {
628 const struct cpumask *nodemask = cpumask_of_node(node);
629
630 /* make sure at least one of the cpus in nodemask is online */
631 if (cpumask_intersects(&mask, nodemask))
632 cpumask_and(&mask, &mask, nodemask);
633 }
634 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
635 raw_spin_unlock(&mask_lock);
636 return ret;
637 }
638 #else
639 /* Wrapper for ALPHA specific affinity selector magic */
irq_setup_affinity(struct irq_desc * desc)640 int irq_setup_affinity(struct irq_desc *desc)
641 {
642 return irq_select_affinity(irq_desc_get_irq(desc));
643 }
644 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
645 #endif /* CONFIG_SMP */
646
647
648 /**
649 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
650 * @irq: interrupt number to set affinity
651 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
652 * specific data for percpu_devid interrupts
653 *
654 * This function uses the vCPU specific data to set the vCPU
655 * affinity for an irq. The vCPU specific data is passed from
656 * outside, such as KVM. One example code path is as below:
657 * KVM -> IOMMU -> irq_set_vcpu_affinity().
658 */
irq_set_vcpu_affinity(unsigned int irq,void * vcpu_info)659 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
660 {
661 unsigned long flags;
662 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
663 struct irq_data *data;
664 struct irq_chip *chip;
665 int ret = -ENOSYS;
666
667 if (!desc)
668 return -EINVAL;
669
670 data = irq_desc_get_irq_data(desc);
671 do {
672 chip = irq_data_get_irq_chip(data);
673 if (chip && chip->irq_set_vcpu_affinity)
674 break;
675 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
676 data = data->parent_data;
677 #else
678 data = NULL;
679 #endif
680 } while (data);
681
682 if (data)
683 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
684 irq_put_desc_unlock(desc, flags);
685
686 return ret;
687 }
688 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
689
__disable_irq(struct irq_desc * desc)690 void __disable_irq(struct irq_desc *desc)
691 {
692 if (!desc->depth++)
693 irq_disable(desc);
694 }
695
__disable_irq_nosync(unsigned int irq)696 static int __disable_irq_nosync(unsigned int irq)
697 {
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
700
701 if (!desc)
702 return -EINVAL;
703 __disable_irq(desc);
704 irq_put_desc_busunlock(desc, flags);
705 return 0;
706 }
707
708 /**
709 * disable_irq_nosync - disable an irq without waiting
710 * @irq: Interrupt to disable
711 *
712 * Disable the selected interrupt line. Disables and Enables are
713 * nested.
714 * Unlike disable_irq(), this function does not ensure existing
715 * instances of the IRQ handler have completed before returning.
716 *
717 * This function may be called from IRQ context.
718 */
disable_irq_nosync(unsigned int irq)719 void disable_irq_nosync(unsigned int irq)
720 {
721 __disable_irq_nosync(irq);
722 }
723 EXPORT_SYMBOL(disable_irq_nosync);
724
725 /**
726 * disable_irq - disable an irq and wait for completion
727 * @irq: Interrupt to disable
728 *
729 * Disable the selected interrupt line. Enables and Disables are
730 * nested.
731 * This function waits for any pending IRQ handlers for this interrupt
732 * to complete before returning. If you use this function while
733 * holding a resource the IRQ handler may need you will deadlock.
734 *
735 * Can only be called from preemptible code as it might sleep when
736 * an interrupt thread is associated to @irq.
737 *
738 */
disable_irq(unsigned int irq)739 void disable_irq(unsigned int irq)
740 {
741 might_sleep();
742 if (!__disable_irq_nosync(irq))
743 synchronize_irq(irq);
744 }
745 EXPORT_SYMBOL(disable_irq);
746
747 /**
748 * disable_hardirq - disables an irq and waits for hardirq completion
749 * @irq: Interrupt to disable
750 *
751 * Disable the selected interrupt line. Enables and Disables are
752 * nested.
753 * This function waits for any pending hard IRQ handlers for this
754 * interrupt to complete before returning. If you use this function while
755 * holding a resource the hard IRQ handler may need you will deadlock.
756 *
757 * When used to optimistically disable an interrupt from atomic context
758 * the return value must be checked.
759 *
760 * Returns: false if a threaded handler is active.
761 *
762 * This function may be called - with care - from IRQ context.
763 */
disable_hardirq(unsigned int irq)764 bool disable_hardirq(unsigned int irq)
765 {
766 if (!__disable_irq_nosync(irq))
767 return synchronize_hardirq(irq);
768
769 return false;
770 }
771 EXPORT_SYMBOL_GPL(disable_hardirq);
772
773 /**
774 * disable_nmi_nosync - disable an nmi without waiting
775 * @irq: Interrupt to disable
776 *
777 * Disable the selected interrupt line. Disables and enables are
778 * nested.
779 * The interrupt to disable must have been requested through request_nmi.
780 * Unlike disable_nmi(), this function does not ensure existing
781 * instances of the IRQ handler have completed before returning.
782 */
disable_nmi_nosync(unsigned int irq)783 void disable_nmi_nosync(unsigned int irq)
784 {
785 disable_irq_nosync(irq);
786 }
787
__enable_irq(struct irq_desc * desc)788 void __enable_irq(struct irq_desc *desc)
789 {
790 switch (desc->depth) {
791 case 0:
792 err_out:
793 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
794 irq_desc_get_irq(desc));
795 break;
796 case 1: {
797 if (desc->istate & IRQS_SUSPENDED)
798 goto err_out;
799 /* Prevent probing on this irq: */
800 irq_settings_set_noprobe(desc);
801 /*
802 * Call irq_startup() not irq_enable() here because the
803 * interrupt might be marked NOAUTOEN so irq_startup()
804 * needs to be invoked when it gets enabled the first time.
805 * This is also required when __enable_irq() is invoked for
806 * a managed and shutdown interrupt from the S3 resume
807 * path.
808 *
809 * If it was already started up, then irq_startup() will
810 * invoke irq_enable() under the hood.
811 */
812 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
813 break;
814 }
815 default:
816 desc->depth--;
817 }
818 }
819
820 /**
821 * enable_irq - enable handling of an irq
822 * @irq: Interrupt to enable
823 *
824 * Undoes the effect of one call to disable_irq(). If this
825 * matches the last disable, processing of interrupts on this
826 * IRQ line is re-enabled.
827 *
828 * This function may be called from IRQ context only when
829 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
830 */
enable_irq(unsigned int irq)831 void enable_irq(unsigned int irq)
832 {
833 unsigned long flags;
834 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
835
836 if (!desc)
837 return;
838 if (WARN(!desc->irq_data.chip,
839 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
840 goto out;
841
842 __enable_irq(desc);
843 out:
844 irq_put_desc_busunlock(desc, flags);
845 }
846 EXPORT_SYMBOL(enable_irq);
847
848 /**
849 * enable_nmi - enable handling of an nmi
850 * @irq: Interrupt to enable
851 *
852 * The interrupt to enable must have been requested through request_nmi.
853 * Undoes the effect of one call to disable_nmi(). If this
854 * matches the last disable, processing of interrupts on this
855 * IRQ line is re-enabled.
856 */
enable_nmi(unsigned int irq)857 void enable_nmi(unsigned int irq)
858 {
859 enable_irq(irq);
860 }
861
set_irq_wake_real(unsigned int irq,unsigned int on)862 static int set_irq_wake_real(unsigned int irq, unsigned int on)
863 {
864 struct irq_desc *desc = irq_to_desc(irq);
865 int ret = -ENXIO;
866
867 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
868 return 0;
869
870 if (desc->irq_data.chip->irq_set_wake)
871 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
872
873 return ret;
874 }
875
876 /**
877 * irq_set_irq_wake - control irq power management wakeup
878 * @irq: interrupt to control
879 * @on: enable/disable power management wakeup
880 *
881 * Enable/disable power management wakeup mode, which is
882 * disabled by default. Enables and disables must match,
883 * just as they match for non-wakeup mode support.
884 *
885 * Wakeup mode lets this IRQ wake the system from sleep
886 * states like "suspend to RAM".
887 *
888 * Note: irq enable/disable state is completely orthogonal
889 * to the enable/disable state of irq wake. An irq can be
890 * disabled with disable_irq() and still wake the system as
891 * long as the irq has wake enabled. If this does not hold,
892 * then the underlying irq chip and the related driver need
893 * to be investigated.
894 */
irq_set_irq_wake(unsigned int irq,unsigned int on)895 int irq_set_irq_wake(unsigned int irq, unsigned int on)
896 {
897 unsigned long flags;
898 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
899 int ret = 0;
900
901 if (!desc)
902 return -EINVAL;
903
904 /* Don't use NMIs as wake up interrupts please */
905 if (irq_is_nmi(desc)) {
906 ret = -EINVAL;
907 goto out_unlock;
908 }
909
910 /* wakeup-capable irqs can be shared between drivers that
911 * don't need to have the same sleep mode behaviors.
912 */
913 if (on) {
914 if (desc->wake_depth++ == 0) {
915 ret = set_irq_wake_real(irq, on);
916 if (ret)
917 desc->wake_depth = 0;
918 else
919 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
920 }
921 } else {
922 if (desc->wake_depth == 0) {
923 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
924 } else if (--desc->wake_depth == 0) {
925 ret = set_irq_wake_real(irq, on);
926 if (ret)
927 desc->wake_depth = 1;
928 else
929 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
930 }
931 }
932
933 out_unlock:
934 irq_put_desc_busunlock(desc, flags);
935 return ret;
936 }
937 EXPORT_SYMBOL(irq_set_irq_wake);
938
939 /*
940 * Internal function that tells the architecture code whether a
941 * particular irq has been exclusively allocated or is available
942 * for driver use.
943 */
can_request_irq(unsigned int irq,unsigned long irqflags)944 int can_request_irq(unsigned int irq, unsigned long irqflags)
945 {
946 unsigned long flags;
947 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
948 int canrequest = 0;
949
950 if (!desc)
951 return 0;
952
953 if (irq_settings_can_request(desc)) {
954 if (!desc->action ||
955 irqflags & desc->action->flags & IRQF_SHARED)
956 canrequest = 1;
957 }
958 irq_put_desc_unlock(desc, flags);
959 return canrequest;
960 }
961
__irq_set_trigger(struct irq_desc * desc,unsigned long flags)962 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
963 {
964 struct irq_chip *chip = desc->irq_data.chip;
965 int ret, unmask = 0;
966
967 if (!chip || !chip->irq_set_type) {
968 /*
969 * IRQF_TRIGGER_* but the PIC does not support multiple
970 * flow-types?
971 */
972 pr_debug("No set_type function for IRQ %d (%s)\n",
973 irq_desc_get_irq(desc),
974 chip ? (chip->name ? : "unknown") : "unknown");
975 return 0;
976 }
977
978 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
979 if (!irqd_irq_masked(&desc->irq_data))
980 mask_irq(desc);
981 if (!irqd_irq_disabled(&desc->irq_data))
982 unmask = 1;
983 }
984
985 /* Mask all flags except trigger mode */
986 flags &= IRQ_TYPE_SENSE_MASK;
987 ret = chip->irq_set_type(&desc->irq_data, flags);
988
989 switch (ret) {
990 case IRQ_SET_MASK_OK:
991 case IRQ_SET_MASK_OK_DONE:
992 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
993 irqd_set(&desc->irq_data, flags);
994 fallthrough;
995
996 case IRQ_SET_MASK_OK_NOCOPY:
997 flags = irqd_get_trigger_type(&desc->irq_data);
998 irq_settings_set_trigger_mask(desc, flags);
999 irqd_clear(&desc->irq_data, IRQD_LEVEL);
1000 irq_settings_clr_level(desc);
1001 if (flags & IRQ_TYPE_LEVEL_MASK) {
1002 irq_settings_set_level(desc);
1003 irqd_set(&desc->irq_data, IRQD_LEVEL);
1004 }
1005
1006 ret = 0;
1007 break;
1008 default:
1009 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1010 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1011 }
1012 if (unmask)
1013 unmask_irq(desc);
1014 return ret;
1015 }
1016
1017 #ifdef CONFIG_HARDIRQS_SW_RESEND
irq_set_parent(int irq,int parent_irq)1018 int irq_set_parent(int irq, int parent_irq)
1019 {
1020 unsigned long flags;
1021 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1022
1023 if (!desc)
1024 return -EINVAL;
1025
1026 desc->parent_irq = parent_irq;
1027
1028 irq_put_desc_unlock(desc, flags);
1029 return 0;
1030 }
1031 EXPORT_SYMBOL_GPL(irq_set_parent);
1032 #endif
1033
1034 /*
1035 * Default primary interrupt handler for threaded interrupts. Is
1036 * assigned as primary handler when request_threaded_irq is called
1037 * with handler == NULL. Useful for oneshot interrupts.
1038 */
irq_default_primary_handler(int irq,void * dev_id)1039 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1040 {
1041 return IRQ_WAKE_THREAD;
1042 }
1043
1044 /*
1045 * Primary handler for nested threaded interrupts. Should never be
1046 * called.
1047 */
irq_nested_primary_handler(int irq,void * dev_id)1048 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1049 {
1050 WARN(1, "Primary handler called for nested irq %d\n", irq);
1051 return IRQ_NONE;
1052 }
1053
irq_forced_secondary_handler(int irq,void * dev_id)1054 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1055 {
1056 WARN(1, "Secondary action handler called for irq %d\n", irq);
1057 return IRQ_NONE;
1058 }
1059
1060 #ifdef CONFIG_SMP
1061 /*
1062 * Check whether we need to change the affinity of the interrupt thread.
1063 */
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1064 static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1065 {
1066 cpumask_var_t mask;
1067 bool valid = false;
1068
1069 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1070 return;
1071
1072 __set_current_state(TASK_RUNNING);
1073
1074 /*
1075 * In case we are out of memory we set IRQTF_AFFINITY again and
1076 * try again next time
1077 */
1078 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1079 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1080 return;
1081 }
1082
1083 raw_spin_lock_irq(&desc->lock);
1084 /*
1085 * This code is triggered unconditionally. Check the affinity
1086 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1087 */
1088 if (cpumask_available(desc->irq_common_data.affinity)) {
1089 const struct cpumask *m;
1090
1091 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1092 cpumask_copy(mask, m);
1093 valid = true;
1094 }
1095 raw_spin_unlock_irq(&desc->lock);
1096
1097 if (valid)
1098 set_cpus_allowed_ptr(current, mask);
1099 free_cpumask_var(mask);
1100 }
1101 #else
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1102 static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1103 #endif
1104
irq_wait_for_interrupt(struct irq_desc * desc,struct irqaction * action)1105 static int irq_wait_for_interrupt(struct irq_desc *desc,
1106 struct irqaction *action)
1107 {
1108 for (;;) {
1109 set_current_state(TASK_INTERRUPTIBLE);
1110 irq_thread_check_affinity(desc, action);
1111
1112 if (kthread_should_stop()) {
1113 /* may need to run one last time */
1114 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1115 &action->thread_flags)) {
1116 __set_current_state(TASK_RUNNING);
1117 return 0;
1118 }
1119 __set_current_state(TASK_RUNNING);
1120 return -1;
1121 }
1122
1123 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1124 &action->thread_flags)) {
1125 __set_current_state(TASK_RUNNING);
1126 return 0;
1127 }
1128 schedule();
1129 }
1130 }
1131
1132 /*
1133 * Oneshot interrupts keep the irq line masked until the threaded
1134 * handler finished. unmask if the interrupt has not been disabled and
1135 * is marked MASKED.
1136 */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action)1137 static void irq_finalize_oneshot(struct irq_desc *desc,
1138 struct irqaction *action)
1139 {
1140 if (!(desc->istate & IRQS_ONESHOT) ||
1141 action->handler == irq_forced_secondary_handler)
1142 return;
1143 again:
1144 chip_bus_lock(desc);
1145 raw_spin_lock_irq(&desc->lock);
1146
1147 /*
1148 * Implausible though it may be we need to protect us against
1149 * the following scenario:
1150 *
1151 * The thread is faster done than the hard interrupt handler
1152 * on the other CPU. If we unmask the irq line then the
1153 * interrupt can come in again and masks the line, leaves due
1154 * to IRQS_INPROGRESS and the irq line is masked forever.
1155 *
1156 * This also serializes the state of shared oneshot handlers
1157 * versus "desc->threads_oneshot |= action->thread_mask;" in
1158 * irq_wake_thread(). See the comment there which explains the
1159 * serialization.
1160 */
1161 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1162 raw_spin_unlock_irq(&desc->lock);
1163 chip_bus_sync_unlock(desc);
1164 cpu_relax();
1165 goto again;
1166 }
1167
1168 /*
1169 * Now check again, whether the thread should run. Otherwise
1170 * we would clear the threads_oneshot bit of this thread which
1171 * was just set.
1172 */
1173 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1174 goto out_unlock;
1175
1176 desc->threads_oneshot &= ~action->thread_mask;
1177
1178 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1179 irqd_irq_masked(&desc->irq_data))
1180 unmask_threaded_irq(desc);
1181
1182 out_unlock:
1183 raw_spin_unlock_irq(&desc->lock);
1184 chip_bus_sync_unlock(desc);
1185 }
1186
1187 /*
1188 * Interrupts which are not explicitly requested as threaded
1189 * interrupts rely on the implicit bh/preempt disable of the hard irq
1190 * context. So we need to disable bh here to avoid deadlocks and other
1191 * side effects.
1192 */
1193 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)1194 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1195 {
1196 irqreturn_t ret;
1197
1198 local_bh_disable();
1199 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1200 local_irq_disable();
1201 ret = action->thread_fn(action->irq, action->dev_id);
1202 if (ret == IRQ_HANDLED)
1203 atomic_inc(&desc->threads_handled);
1204
1205 irq_finalize_oneshot(desc, action);
1206 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1207 local_irq_enable();
1208 local_bh_enable();
1209 return ret;
1210 }
1211
1212 /*
1213 * Interrupts explicitly requested as threaded interrupts want to be
1214 * preemptible - many of them need to sleep and wait for slow busses to
1215 * complete.
1216 */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)1217 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1218 struct irqaction *action)
1219 {
1220 irqreturn_t ret;
1221
1222 ret = action->thread_fn(action->irq, action->dev_id);
1223 if (ret == IRQ_HANDLED)
1224 atomic_inc(&desc->threads_handled);
1225
1226 irq_finalize_oneshot(desc, action);
1227 return ret;
1228 }
1229
wake_threads_waitq(struct irq_desc * desc)1230 void wake_threads_waitq(struct irq_desc *desc)
1231 {
1232 if (atomic_dec_and_test(&desc->threads_active))
1233 wake_up(&desc->wait_for_threads);
1234 }
1235
irq_thread_dtor(struct callback_head * unused)1236 static void irq_thread_dtor(struct callback_head *unused)
1237 {
1238 struct task_struct *tsk = current;
1239 struct irq_desc *desc;
1240 struct irqaction *action;
1241
1242 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1243 return;
1244
1245 action = kthread_data(tsk);
1246
1247 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1248 tsk->comm, tsk->pid, action->irq);
1249
1250
1251 desc = irq_to_desc(action->irq);
1252 /*
1253 * If IRQTF_RUNTHREAD is set, we need to decrement
1254 * desc->threads_active and wake possible waiters.
1255 */
1256 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1257 wake_threads_waitq(desc);
1258
1259 /* Prevent a stale desc->threads_oneshot */
1260 irq_finalize_oneshot(desc, action);
1261 }
1262
irq_wake_secondary(struct irq_desc * desc,struct irqaction * action)1263 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1264 {
1265 struct irqaction *secondary = action->secondary;
1266
1267 if (WARN_ON_ONCE(!secondary))
1268 return;
1269
1270 raw_spin_lock_irq(&desc->lock);
1271 __irq_wake_thread(desc, secondary);
1272 raw_spin_unlock_irq(&desc->lock);
1273 }
1274
1275 /*
1276 * Internal function to notify that a interrupt thread is ready.
1277 */
irq_thread_set_ready(struct irq_desc * desc,struct irqaction * action)1278 static void irq_thread_set_ready(struct irq_desc *desc,
1279 struct irqaction *action)
1280 {
1281 set_bit(IRQTF_READY, &action->thread_flags);
1282 wake_up(&desc->wait_for_threads);
1283 }
1284
1285 /*
1286 * Internal function to wake up a interrupt thread and wait until it is
1287 * ready.
1288 */
wake_up_and_wait_for_irq_thread_ready(struct irq_desc * desc,struct irqaction * action)1289 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1290 struct irqaction *action)
1291 {
1292 if (!action || !action->thread)
1293 return;
1294
1295 wake_up_process(action->thread);
1296 wait_event(desc->wait_for_threads,
1297 test_bit(IRQTF_READY, &action->thread_flags));
1298 }
1299
1300 /*
1301 * Interrupt handler thread
1302 */
irq_thread(void * data)1303 static int irq_thread(void *data)
1304 {
1305 struct callback_head on_exit_work;
1306 struct irqaction *action = data;
1307 struct irq_desc *desc = irq_to_desc(action->irq);
1308 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1309 struct irqaction *action);
1310
1311 irq_thread_set_ready(desc, action);
1312
1313 sched_set_fifo(current);
1314
1315 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1316 &action->thread_flags))
1317 handler_fn = irq_forced_thread_fn;
1318 else
1319 handler_fn = irq_thread_fn;
1320
1321 init_task_work(&on_exit_work, irq_thread_dtor);
1322 task_work_add(current, &on_exit_work, TWA_NONE);
1323
1324 while (!irq_wait_for_interrupt(desc, action)) {
1325 irqreturn_t action_ret;
1326
1327 action_ret = handler_fn(desc, action);
1328 if (action_ret == IRQ_WAKE_THREAD)
1329 irq_wake_secondary(desc, action);
1330
1331 wake_threads_waitq(desc);
1332 }
1333
1334 /*
1335 * This is the regular exit path. __free_irq() is stopping the
1336 * thread via kthread_stop() after calling
1337 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1338 * oneshot mask bit can be set.
1339 */
1340 task_work_cancel(current, irq_thread_dtor);
1341 return 0;
1342 }
1343
1344 /**
1345 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1346 * @irq: Interrupt line
1347 * @dev_id: Device identity for which the thread should be woken
1348 *
1349 */
irq_wake_thread(unsigned int irq,void * dev_id)1350 void irq_wake_thread(unsigned int irq, void *dev_id)
1351 {
1352 struct irq_desc *desc = irq_to_desc(irq);
1353 struct irqaction *action;
1354 unsigned long flags;
1355
1356 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1357 return;
1358
1359 raw_spin_lock_irqsave(&desc->lock, flags);
1360 for_each_action_of_desc(desc, action) {
1361 if (action->dev_id == dev_id) {
1362 if (action->thread)
1363 __irq_wake_thread(desc, action);
1364 break;
1365 }
1366 }
1367 raw_spin_unlock_irqrestore(&desc->lock, flags);
1368 }
1369 EXPORT_SYMBOL_GPL(irq_wake_thread);
1370
irq_setup_forced_threading(struct irqaction * new)1371 static int irq_setup_forced_threading(struct irqaction *new)
1372 {
1373 if (!force_irqthreads())
1374 return 0;
1375 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1376 return 0;
1377
1378 /*
1379 * No further action required for interrupts which are requested as
1380 * threaded interrupts already
1381 */
1382 if (new->handler == irq_default_primary_handler)
1383 return 0;
1384
1385 new->flags |= IRQF_ONESHOT;
1386
1387 /*
1388 * Handle the case where we have a real primary handler and a
1389 * thread handler. We force thread them as well by creating a
1390 * secondary action.
1391 */
1392 if (new->handler && new->thread_fn) {
1393 /* Allocate the secondary action */
1394 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1395 if (!new->secondary)
1396 return -ENOMEM;
1397 new->secondary->handler = irq_forced_secondary_handler;
1398 new->secondary->thread_fn = new->thread_fn;
1399 new->secondary->dev_id = new->dev_id;
1400 new->secondary->irq = new->irq;
1401 new->secondary->name = new->name;
1402 }
1403 /* Deal with the primary handler */
1404 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1405 new->thread_fn = new->handler;
1406 new->handler = irq_default_primary_handler;
1407 return 0;
1408 }
1409
irq_request_resources(struct irq_desc * desc)1410 static int irq_request_resources(struct irq_desc *desc)
1411 {
1412 struct irq_data *d = &desc->irq_data;
1413 struct irq_chip *c = d->chip;
1414
1415 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1416 }
1417
irq_release_resources(struct irq_desc * desc)1418 static void irq_release_resources(struct irq_desc *desc)
1419 {
1420 struct irq_data *d = &desc->irq_data;
1421 struct irq_chip *c = d->chip;
1422
1423 if (c->irq_release_resources)
1424 c->irq_release_resources(d);
1425 }
1426
irq_supports_nmi(struct irq_desc * desc)1427 static bool irq_supports_nmi(struct irq_desc *desc)
1428 {
1429 struct irq_data *d = irq_desc_get_irq_data(desc);
1430
1431 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1432 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1433 if (d->parent_data)
1434 return false;
1435 #endif
1436 /* Don't support NMIs for chips behind a slow bus */
1437 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1438 return false;
1439
1440 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1441 }
1442
irq_nmi_setup(struct irq_desc * desc)1443 static int irq_nmi_setup(struct irq_desc *desc)
1444 {
1445 struct irq_data *d = irq_desc_get_irq_data(desc);
1446 struct irq_chip *c = d->chip;
1447
1448 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1449 }
1450
irq_nmi_teardown(struct irq_desc * desc)1451 static void irq_nmi_teardown(struct irq_desc *desc)
1452 {
1453 struct irq_data *d = irq_desc_get_irq_data(desc);
1454 struct irq_chip *c = d->chip;
1455
1456 if (c->irq_nmi_teardown)
1457 c->irq_nmi_teardown(d);
1458 }
1459
1460 static int
setup_irq_thread(struct irqaction * new,unsigned int irq,bool secondary)1461 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1462 {
1463 struct task_struct *t;
1464
1465 if (!secondary) {
1466 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1467 new->name);
1468 } else {
1469 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1470 new->name);
1471 }
1472
1473 if (IS_ERR(t))
1474 return PTR_ERR(t);
1475
1476 /*
1477 * We keep the reference to the task struct even if
1478 * the thread dies to avoid that the interrupt code
1479 * references an already freed task_struct.
1480 */
1481 new->thread = get_task_struct(t);
1482 /*
1483 * Tell the thread to set its affinity. This is
1484 * important for shared interrupt handlers as we do
1485 * not invoke setup_affinity() for the secondary
1486 * handlers as everything is already set up. Even for
1487 * interrupts marked with IRQF_NO_BALANCE this is
1488 * correct as we want the thread to move to the cpu(s)
1489 * on which the requesting code placed the interrupt.
1490 */
1491 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1492 return 0;
1493 }
1494
1495 /*
1496 * Internal function to register an irqaction - typically used to
1497 * allocate special interrupts that are part of the architecture.
1498 *
1499 * Locking rules:
1500 *
1501 * desc->request_mutex Provides serialization against a concurrent free_irq()
1502 * chip_bus_lock Provides serialization for slow bus operations
1503 * desc->lock Provides serialization against hard interrupts
1504 *
1505 * chip_bus_lock and desc->lock are sufficient for all other management and
1506 * interrupt related functions. desc->request_mutex solely serializes
1507 * request/free_irq().
1508 */
1509 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)1510 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1511 {
1512 struct irqaction *old, **old_ptr;
1513 unsigned long flags, thread_mask = 0;
1514 int ret, nested, shared = 0;
1515
1516 if (!desc)
1517 return -EINVAL;
1518
1519 if (desc->irq_data.chip == &no_irq_chip)
1520 return -ENOSYS;
1521 if (!try_module_get(desc->owner))
1522 return -ENODEV;
1523
1524 new->irq = irq;
1525
1526 /*
1527 * If the trigger type is not specified by the caller,
1528 * then use the default for this interrupt.
1529 */
1530 if (!(new->flags & IRQF_TRIGGER_MASK))
1531 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1532
1533 /*
1534 * Check whether the interrupt nests into another interrupt
1535 * thread.
1536 */
1537 nested = irq_settings_is_nested_thread(desc);
1538 if (nested) {
1539 if (!new->thread_fn) {
1540 ret = -EINVAL;
1541 goto out_mput;
1542 }
1543 /*
1544 * Replace the primary handler which was provided from
1545 * the driver for non nested interrupt handling by the
1546 * dummy function which warns when called.
1547 */
1548 new->handler = irq_nested_primary_handler;
1549 } else {
1550 if (irq_settings_can_thread(desc)) {
1551 ret = irq_setup_forced_threading(new);
1552 if (ret)
1553 goto out_mput;
1554 }
1555 }
1556
1557 /*
1558 * Create a handler thread when a thread function is supplied
1559 * and the interrupt does not nest into another interrupt
1560 * thread.
1561 */
1562 if (new->thread_fn && !nested) {
1563 ret = setup_irq_thread(new, irq, false);
1564 if (ret)
1565 goto out_mput;
1566 if (new->secondary) {
1567 ret = setup_irq_thread(new->secondary, irq, true);
1568 if (ret)
1569 goto out_thread;
1570 }
1571 }
1572
1573 /*
1574 * Drivers are often written to work w/o knowledge about the
1575 * underlying irq chip implementation, so a request for a
1576 * threaded irq without a primary hard irq context handler
1577 * requires the ONESHOT flag to be set. Some irq chips like
1578 * MSI based interrupts are per se one shot safe. Check the
1579 * chip flags, so we can avoid the unmask dance at the end of
1580 * the threaded handler for those.
1581 */
1582 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1583 new->flags &= ~IRQF_ONESHOT;
1584
1585 /*
1586 * Protects against a concurrent __free_irq() call which might wait
1587 * for synchronize_hardirq() to complete without holding the optional
1588 * chip bus lock and desc->lock. Also protects against handing out
1589 * a recycled oneshot thread_mask bit while it's still in use by
1590 * its previous owner.
1591 */
1592 mutex_lock(&desc->request_mutex);
1593
1594 /*
1595 * Acquire bus lock as the irq_request_resources() callback below
1596 * might rely on the serialization or the magic power management
1597 * functions which are abusing the irq_bus_lock() callback,
1598 */
1599 chip_bus_lock(desc);
1600
1601 /* First installed action requests resources. */
1602 if (!desc->action) {
1603 ret = irq_request_resources(desc);
1604 if (ret) {
1605 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1606 new->name, irq, desc->irq_data.chip->name);
1607 goto out_bus_unlock;
1608 }
1609 }
1610
1611 /*
1612 * The following block of code has to be executed atomically
1613 * protected against a concurrent interrupt and any of the other
1614 * management calls which are not serialized via
1615 * desc->request_mutex or the optional bus lock.
1616 */
1617 raw_spin_lock_irqsave(&desc->lock, flags);
1618 old_ptr = &desc->action;
1619 old = *old_ptr;
1620 if (old) {
1621 /*
1622 * Can't share interrupts unless both agree to and are
1623 * the same type (level, edge, polarity). So both flag
1624 * fields must have IRQF_SHARED set and the bits which
1625 * set the trigger type must match. Also all must
1626 * agree on ONESHOT.
1627 * Interrupt lines used for NMIs cannot be shared.
1628 */
1629 unsigned int oldtype;
1630
1631 if (irq_is_nmi(desc)) {
1632 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1633 new->name, irq, desc->irq_data.chip->name);
1634 ret = -EINVAL;
1635 goto out_unlock;
1636 }
1637
1638 /*
1639 * If nobody did set the configuration before, inherit
1640 * the one provided by the requester.
1641 */
1642 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1643 oldtype = irqd_get_trigger_type(&desc->irq_data);
1644 } else {
1645 oldtype = new->flags & IRQF_TRIGGER_MASK;
1646 irqd_set_trigger_type(&desc->irq_data, oldtype);
1647 }
1648
1649 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1650 (oldtype != (new->flags & IRQF_TRIGGER_MASK)))
1651 goto mismatch;
1652
1653 if ((old->flags & IRQF_ONESHOT) &&
1654 (new->flags & IRQF_COND_ONESHOT))
1655 new->flags |= IRQF_ONESHOT;
1656 else if ((old->flags ^ new->flags) & IRQF_ONESHOT)
1657 goto mismatch;
1658
1659 /* All handlers must agree on per-cpuness */
1660 if ((old->flags & IRQF_PERCPU) !=
1661 (new->flags & IRQF_PERCPU))
1662 goto mismatch;
1663
1664 /* add new interrupt at end of irq queue */
1665 do {
1666 /*
1667 * Or all existing action->thread_mask bits,
1668 * so we can find the next zero bit for this
1669 * new action.
1670 */
1671 thread_mask |= old->thread_mask;
1672 old_ptr = &old->next;
1673 old = *old_ptr;
1674 } while (old);
1675 shared = 1;
1676 }
1677
1678 /*
1679 * Setup the thread mask for this irqaction for ONESHOT. For
1680 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1681 * conditional in irq_wake_thread().
1682 */
1683 if (new->flags & IRQF_ONESHOT) {
1684 /*
1685 * Unlikely to have 32 resp 64 irqs sharing one line,
1686 * but who knows.
1687 */
1688 if (thread_mask == ~0UL) {
1689 ret = -EBUSY;
1690 goto out_unlock;
1691 }
1692 /*
1693 * The thread_mask for the action is or'ed to
1694 * desc->thread_active to indicate that the
1695 * IRQF_ONESHOT thread handler has been woken, but not
1696 * yet finished. The bit is cleared when a thread
1697 * completes. When all threads of a shared interrupt
1698 * line have completed desc->threads_active becomes
1699 * zero and the interrupt line is unmasked. See
1700 * handle.c:irq_wake_thread() for further information.
1701 *
1702 * If no thread is woken by primary (hard irq context)
1703 * interrupt handlers, then desc->threads_active is
1704 * also checked for zero to unmask the irq line in the
1705 * affected hard irq flow handlers
1706 * (handle_[fasteoi|level]_irq).
1707 *
1708 * The new action gets the first zero bit of
1709 * thread_mask assigned. See the loop above which or's
1710 * all existing action->thread_mask bits.
1711 */
1712 new->thread_mask = 1UL << ffz(thread_mask);
1713
1714 } else if (new->handler == irq_default_primary_handler &&
1715 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1716 /*
1717 * The interrupt was requested with handler = NULL, so
1718 * we use the default primary handler for it. But it
1719 * does not have the oneshot flag set. In combination
1720 * with level interrupts this is deadly, because the
1721 * default primary handler just wakes the thread, then
1722 * the irq lines is reenabled, but the device still
1723 * has the level irq asserted. Rinse and repeat....
1724 *
1725 * While this works for edge type interrupts, we play
1726 * it safe and reject unconditionally because we can't
1727 * say for sure which type this interrupt really
1728 * has. The type flags are unreliable as the
1729 * underlying chip implementation can override them.
1730 */
1731 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1732 new->name, irq);
1733 ret = -EINVAL;
1734 goto out_unlock;
1735 }
1736
1737 if (!shared) {
1738 /* Setup the type (level, edge polarity) if configured: */
1739 if (new->flags & IRQF_TRIGGER_MASK) {
1740 ret = __irq_set_trigger(desc,
1741 new->flags & IRQF_TRIGGER_MASK);
1742
1743 if (ret)
1744 goto out_unlock;
1745 }
1746
1747 /*
1748 * Activate the interrupt. That activation must happen
1749 * independently of IRQ_NOAUTOEN. request_irq() can fail
1750 * and the callers are supposed to handle
1751 * that. enable_irq() of an interrupt requested with
1752 * IRQ_NOAUTOEN is not supposed to fail. The activation
1753 * keeps it in shutdown mode, it merily associates
1754 * resources if necessary and if that's not possible it
1755 * fails. Interrupts which are in managed shutdown mode
1756 * will simply ignore that activation request.
1757 */
1758 ret = irq_activate(desc);
1759 if (ret)
1760 goto out_unlock;
1761
1762 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1763 IRQS_ONESHOT | IRQS_WAITING);
1764 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1765
1766 if (new->flags & IRQF_PERCPU) {
1767 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1768 irq_settings_set_per_cpu(desc);
1769 if (new->flags & IRQF_NO_DEBUG)
1770 irq_settings_set_no_debug(desc);
1771 }
1772
1773 if (noirqdebug)
1774 irq_settings_set_no_debug(desc);
1775
1776 if (new->flags & IRQF_ONESHOT)
1777 desc->istate |= IRQS_ONESHOT;
1778
1779 /* Exclude IRQ from balancing if requested */
1780 if (new->flags & IRQF_NOBALANCING) {
1781 irq_settings_set_no_balancing(desc);
1782 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1783 }
1784
1785 if (!(new->flags & IRQF_NO_AUTOEN) &&
1786 irq_settings_can_autoenable(desc)) {
1787 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1788 } else {
1789 /*
1790 * Shared interrupts do not go well with disabling
1791 * auto enable. The sharing interrupt might request
1792 * it while it's still disabled and then wait for
1793 * interrupts forever.
1794 */
1795 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1796 /* Undo nested disables: */
1797 desc->depth = 1;
1798 }
1799
1800 } else if (new->flags & IRQF_TRIGGER_MASK) {
1801 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1802 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1803
1804 if (nmsk != omsk)
1805 /* hope the handler works with current trigger mode */
1806 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1807 irq, omsk, nmsk);
1808 }
1809
1810 *old_ptr = new;
1811
1812 irq_pm_install_action(desc, new);
1813
1814 /* Reset broken irq detection when installing new handler */
1815 desc->irq_count = 0;
1816 desc->irqs_unhandled = 0;
1817
1818 /*
1819 * Check whether we disabled the irq via the spurious handler
1820 * before. Reenable it and give it another chance.
1821 */
1822 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1823 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1824 __enable_irq(desc);
1825 }
1826
1827 raw_spin_unlock_irqrestore(&desc->lock, flags);
1828 chip_bus_sync_unlock(desc);
1829 mutex_unlock(&desc->request_mutex);
1830
1831 irq_setup_timings(desc, new);
1832
1833 wake_up_and_wait_for_irq_thread_ready(desc, new);
1834 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1835
1836 register_irq_proc(irq, desc);
1837 new->dir = NULL;
1838 register_handler_proc(irq, new);
1839 return 0;
1840
1841 mismatch:
1842 if (!(new->flags & IRQF_PROBE_SHARED)) {
1843 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1844 irq, new->flags, new->name, old->flags, old->name);
1845 #ifdef CONFIG_DEBUG_SHIRQ
1846 dump_stack();
1847 #endif
1848 }
1849 ret = -EBUSY;
1850
1851 out_unlock:
1852 raw_spin_unlock_irqrestore(&desc->lock, flags);
1853
1854 if (!desc->action)
1855 irq_release_resources(desc);
1856 out_bus_unlock:
1857 chip_bus_sync_unlock(desc);
1858 mutex_unlock(&desc->request_mutex);
1859
1860 out_thread:
1861 if (new->thread) {
1862 struct task_struct *t = new->thread;
1863
1864 new->thread = NULL;
1865 kthread_stop_put(t);
1866 }
1867 if (new->secondary && new->secondary->thread) {
1868 struct task_struct *t = new->secondary->thread;
1869
1870 new->secondary->thread = NULL;
1871 kthread_stop_put(t);
1872 }
1873 out_mput:
1874 module_put(desc->owner);
1875 return ret;
1876 }
1877
1878 /*
1879 * Internal function to unregister an irqaction - used to free
1880 * regular and special interrupts that are part of the architecture.
1881 */
__free_irq(struct irq_desc * desc,void * dev_id)1882 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1883 {
1884 unsigned irq = desc->irq_data.irq;
1885 struct irqaction *action, **action_ptr;
1886 unsigned long flags;
1887
1888 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1889
1890 mutex_lock(&desc->request_mutex);
1891 chip_bus_lock(desc);
1892 raw_spin_lock_irqsave(&desc->lock, flags);
1893
1894 /*
1895 * There can be multiple actions per IRQ descriptor, find the right
1896 * one based on the dev_id:
1897 */
1898 action_ptr = &desc->action;
1899 for (;;) {
1900 action = *action_ptr;
1901
1902 if (!action) {
1903 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1904 raw_spin_unlock_irqrestore(&desc->lock, flags);
1905 chip_bus_sync_unlock(desc);
1906 mutex_unlock(&desc->request_mutex);
1907 return NULL;
1908 }
1909
1910 if (action->dev_id == dev_id)
1911 break;
1912 action_ptr = &action->next;
1913 }
1914
1915 /* Found it - now remove it from the list of entries: */
1916 *action_ptr = action->next;
1917
1918 irq_pm_remove_action(desc, action);
1919
1920 /* If this was the last handler, shut down the IRQ line: */
1921 if (!desc->action) {
1922 irq_settings_clr_disable_unlazy(desc);
1923 /* Only shutdown. Deactivate after synchronize_hardirq() */
1924 irq_shutdown(desc);
1925 }
1926
1927 #ifdef CONFIG_SMP
1928 /* make sure affinity_hint is cleaned up */
1929 if (WARN_ON_ONCE(desc->affinity_hint))
1930 desc->affinity_hint = NULL;
1931 #endif
1932
1933 raw_spin_unlock_irqrestore(&desc->lock, flags);
1934 /*
1935 * Drop bus_lock here so the changes which were done in the chip
1936 * callbacks above are synced out to the irq chips which hang
1937 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1938 *
1939 * Aside of that the bus_lock can also be taken from the threaded
1940 * handler in irq_finalize_oneshot() which results in a deadlock
1941 * because kthread_stop() would wait forever for the thread to
1942 * complete, which is blocked on the bus lock.
1943 *
1944 * The still held desc->request_mutex() protects against a
1945 * concurrent request_irq() of this irq so the release of resources
1946 * and timing data is properly serialized.
1947 */
1948 chip_bus_sync_unlock(desc);
1949
1950 unregister_handler_proc(irq, action);
1951
1952 /*
1953 * Make sure it's not being used on another CPU and if the chip
1954 * supports it also make sure that there is no (not yet serviced)
1955 * interrupt in flight at the hardware level.
1956 */
1957 __synchronize_irq(desc);
1958
1959 #ifdef CONFIG_DEBUG_SHIRQ
1960 /*
1961 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1962 * event to happen even now it's being freed, so let's make sure that
1963 * is so by doing an extra call to the handler ....
1964 *
1965 * ( We do this after actually deregistering it, to make sure that a
1966 * 'real' IRQ doesn't run in parallel with our fake. )
1967 */
1968 if (action->flags & IRQF_SHARED) {
1969 local_irq_save(flags);
1970 action->handler(irq, dev_id);
1971 local_irq_restore(flags);
1972 }
1973 #endif
1974
1975 /*
1976 * The action has already been removed above, but the thread writes
1977 * its oneshot mask bit when it completes. Though request_mutex is
1978 * held across this which prevents __setup_irq() from handing out
1979 * the same bit to a newly requested action.
1980 */
1981 if (action->thread) {
1982 kthread_stop_put(action->thread);
1983 if (action->secondary && action->secondary->thread)
1984 kthread_stop_put(action->secondary->thread);
1985 }
1986
1987 /* Last action releases resources */
1988 if (!desc->action) {
1989 /*
1990 * Reacquire bus lock as irq_release_resources() might
1991 * require it to deallocate resources over the slow bus.
1992 */
1993 chip_bus_lock(desc);
1994 /*
1995 * There is no interrupt on the fly anymore. Deactivate it
1996 * completely.
1997 */
1998 raw_spin_lock_irqsave(&desc->lock, flags);
1999 irq_domain_deactivate_irq(&desc->irq_data);
2000 raw_spin_unlock_irqrestore(&desc->lock, flags);
2001
2002 irq_release_resources(desc);
2003 chip_bus_sync_unlock(desc);
2004 irq_remove_timings(desc);
2005 }
2006
2007 mutex_unlock(&desc->request_mutex);
2008
2009 irq_chip_pm_put(&desc->irq_data);
2010 module_put(desc->owner);
2011 kfree(action->secondary);
2012 return action;
2013 }
2014
2015 /**
2016 * free_irq - free an interrupt allocated with request_irq
2017 * @irq: Interrupt line to free
2018 * @dev_id: Device identity to free
2019 *
2020 * Remove an interrupt handler. The handler is removed and if the
2021 * interrupt line is no longer in use by any driver it is disabled.
2022 * On a shared IRQ the caller must ensure the interrupt is disabled
2023 * on the card it drives before calling this function. The function
2024 * does not return until any executing interrupts for this IRQ
2025 * have completed.
2026 *
2027 * This function must not be called from interrupt context.
2028 *
2029 * Returns the devname argument passed to request_irq.
2030 */
free_irq(unsigned int irq,void * dev_id)2031 const void *free_irq(unsigned int irq, void *dev_id)
2032 {
2033 struct irq_desc *desc = irq_to_desc(irq);
2034 struct irqaction *action;
2035 const char *devname;
2036
2037 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2038 return NULL;
2039
2040 #ifdef CONFIG_SMP
2041 if (WARN_ON(desc->affinity_notify))
2042 desc->affinity_notify = NULL;
2043 #endif
2044
2045 action = __free_irq(desc, dev_id);
2046
2047 if (!action)
2048 return NULL;
2049
2050 devname = action->name;
2051 kfree(action);
2052 return devname;
2053 }
2054 EXPORT_SYMBOL(free_irq);
2055
2056 /* This function must be called with desc->lock held */
__cleanup_nmi(unsigned int irq,struct irq_desc * desc)2057 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2058 {
2059 const char *devname = NULL;
2060
2061 desc->istate &= ~IRQS_NMI;
2062
2063 if (!WARN_ON(desc->action == NULL)) {
2064 irq_pm_remove_action(desc, desc->action);
2065 devname = desc->action->name;
2066 unregister_handler_proc(irq, desc->action);
2067
2068 kfree(desc->action);
2069 desc->action = NULL;
2070 }
2071
2072 irq_settings_clr_disable_unlazy(desc);
2073 irq_shutdown_and_deactivate(desc);
2074
2075 irq_release_resources(desc);
2076
2077 irq_chip_pm_put(&desc->irq_data);
2078 module_put(desc->owner);
2079
2080 return devname;
2081 }
2082
free_nmi(unsigned int irq,void * dev_id)2083 const void *free_nmi(unsigned int irq, void *dev_id)
2084 {
2085 struct irq_desc *desc = irq_to_desc(irq);
2086 unsigned long flags;
2087 const void *devname;
2088
2089 if (!desc || WARN_ON(!irq_is_nmi(desc)))
2090 return NULL;
2091
2092 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2093 return NULL;
2094
2095 /* NMI still enabled */
2096 if (WARN_ON(desc->depth == 0))
2097 disable_nmi_nosync(irq);
2098
2099 raw_spin_lock_irqsave(&desc->lock, flags);
2100
2101 irq_nmi_teardown(desc);
2102 devname = __cleanup_nmi(irq, desc);
2103
2104 raw_spin_unlock_irqrestore(&desc->lock, flags);
2105
2106 return devname;
2107 }
2108
2109 /**
2110 * request_threaded_irq - allocate an interrupt line
2111 * @irq: Interrupt line to allocate
2112 * @handler: Function to be called when the IRQ occurs.
2113 * Primary handler for threaded interrupts.
2114 * If handler is NULL and thread_fn != NULL
2115 * the default primary handler is installed.
2116 * @thread_fn: Function called from the irq handler thread
2117 * If NULL, no irq thread is created
2118 * @irqflags: Interrupt type flags
2119 * @devname: An ascii name for the claiming device
2120 * @dev_id: A cookie passed back to the handler function
2121 *
2122 * This call allocates interrupt resources and enables the
2123 * interrupt line and IRQ handling. From the point this
2124 * call is made your handler function may be invoked. Since
2125 * your handler function must clear any interrupt the board
2126 * raises, you must take care both to initialise your hardware
2127 * and to set up the interrupt handler in the right order.
2128 *
2129 * If you want to set up a threaded irq handler for your device
2130 * then you need to supply @handler and @thread_fn. @handler is
2131 * still called in hard interrupt context and has to check
2132 * whether the interrupt originates from the device. If yes it
2133 * needs to disable the interrupt on the device and return
2134 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2135 * @thread_fn. This split handler design is necessary to support
2136 * shared interrupts.
2137 *
2138 * Dev_id must be globally unique. Normally the address of the
2139 * device data structure is used as the cookie. Since the handler
2140 * receives this value it makes sense to use it.
2141 *
2142 * If your interrupt is shared you must pass a non NULL dev_id
2143 * as this is required when freeing the interrupt.
2144 *
2145 * Flags:
2146 *
2147 * IRQF_SHARED Interrupt is shared
2148 * IRQF_TRIGGER_* Specify active edge(s) or level
2149 * IRQF_ONESHOT Run thread_fn with interrupt line masked
2150 */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)2151 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2152 irq_handler_t thread_fn, unsigned long irqflags,
2153 const char *devname, void *dev_id)
2154 {
2155 struct irqaction *action;
2156 struct irq_desc *desc;
2157 int retval;
2158
2159 if (irq == IRQ_NOTCONNECTED)
2160 return -ENOTCONN;
2161
2162 /*
2163 * Sanity-check: shared interrupts must pass in a real dev-ID,
2164 * otherwise we'll have trouble later trying to figure out
2165 * which interrupt is which (messes up the interrupt freeing
2166 * logic etc).
2167 *
2168 * Also shared interrupts do not go well with disabling auto enable.
2169 * The sharing interrupt might request it while it's still disabled
2170 * and then wait for interrupts forever.
2171 *
2172 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2173 * it cannot be set along with IRQF_NO_SUSPEND.
2174 */
2175 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2176 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2177 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2178 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2179 return -EINVAL;
2180
2181 desc = irq_to_desc(irq);
2182 if (!desc)
2183 return -EINVAL;
2184
2185 if (!irq_settings_can_request(desc) ||
2186 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2187 return -EINVAL;
2188
2189 if (!handler) {
2190 if (!thread_fn)
2191 return -EINVAL;
2192 handler = irq_default_primary_handler;
2193 }
2194
2195 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2196 if (!action)
2197 return -ENOMEM;
2198
2199 action->handler = handler;
2200 action->thread_fn = thread_fn;
2201 action->flags = irqflags;
2202 action->name = devname;
2203 action->dev_id = dev_id;
2204
2205 retval = irq_chip_pm_get(&desc->irq_data);
2206 if (retval < 0) {
2207 kfree(action);
2208 return retval;
2209 }
2210
2211 retval = __setup_irq(irq, desc, action);
2212
2213 if (retval) {
2214 irq_chip_pm_put(&desc->irq_data);
2215 kfree(action->secondary);
2216 kfree(action);
2217 }
2218
2219 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2220 if (!retval && (irqflags & IRQF_SHARED)) {
2221 /*
2222 * It's a shared IRQ -- the driver ought to be prepared for it
2223 * to happen immediately, so let's make sure....
2224 * We disable the irq to make sure that a 'real' IRQ doesn't
2225 * run in parallel with our fake.
2226 */
2227 unsigned long flags;
2228
2229 disable_irq(irq);
2230 local_irq_save(flags);
2231
2232 handler(irq, dev_id);
2233
2234 local_irq_restore(flags);
2235 enable_irq(irq);
2236 }
2237 #endif
2238 return retval;
2239 }
2240 EXPORT_SYMBOL(request_threaded_irq);
2241
2242 /**
2243 * request_any_context_irq - allocate an interrupt line
2244 * @irq: Interrupt line to allocate
2245 * @handler: Function to be called when the IRQ occurs.
2246 * Threaded handler for threaded interrupts.
2247 * @flags: Interrupt type flags
2248 * @name: An ascii name for the claiming device
2249 * @dev_id: A cookie passed back to the handler function
2250 *
2251 * This call allocates interrupt resources and enables the
2252 * interrupt line and IRQ handling. It selects either a
2253 * hardirq or threaded handling method depending on the
2254 * context.
2255 *
2256 * On failure, it returns a negative value. On success,
2257 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2258 */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)2259 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2260 unsigned long flags, const char *name, void *dev_id)
2261 {
2262 struct irq_desc *desc;
2263 int ret;
2264
2265 if (irq == IRQ_NOTCONNECTED)
2266 return -ENOTCONN;
2267
2268 desc = irq_to_desc(irq);
2269 if (!desc)
2270 return -EINVAL;
2271
2272 if (irq_settings_is_nested_thread(desc)) {
2273 ret = request_threaded_irq(irq, NULL, handler,
2274 flags, name, dev_id);
2275 return !ret ? IRQC_IS_NESTED : ret;
2276 }
2277
2278 ret = request_irq(irq, handler, flags, name, dev_id);
2279 return !ret ? IRQC_IS_HARDIRQ : ret;
2280 }
2281 EXPORT_SYMBOL_GPL(request_any_context_irq);
2282
2283 /**
2284 * request_nmi - allocate an interrupt line for NMI delivery
2285 * @irq: Interrupt line to allocate
2286 * @handler: Function to be called when the IRQ occurs.
2287 * Threaded handler for threaded interrupts.
2288 * @irqflags: Interrupt type flags
2289 * @name: An ascii name for the claiming device
2290 * @dev_id: A cookie passed back to the handler function
2291 *
2292 * This call allocates interrupt resources and enables the
2293 * interrupt line and IRQ handling. It sets up the IRQ line
2294 * to be handled as an NMI.
2295 *
2296 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2297 * cannot be threaded.
2298 *
2299 * Interrupt lines requested for NMI delivering must produce per cpu
2300 * interrupts and have auto enabling setting disabled.
2301 *
2302 * Dev_id must be globally unique. Normally the address of the
2303 * device data structure is used as the cookie. Since the handler
2304 * receives this value it makes sense to use it.
2305 *
2306 * If the interrupt line cannot be used to deliver NMIs, function
2307 * will fail and return a negative value.
2308 */
request_nmi(unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * name,void * dev_id)2309 int request_nmi(unsigned int irq, irq_handler_t handler,
2310 unsigned long irqflags, const char *name, void *dev_id)
2311 {
2312 struct irqaction *action;
2313 struct irq_desc *desc;
2314 unsigned long flags;
2315 int retval;
2316
2317 if (irq == IRQ_NOTCONNECTED)
2318 return -ENOTCONN;
2319
2320 /* NMI cannot be shared, used for Polling */
2321 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2322 return -EINVAL;
2323
2324 if (!(irqflags & IRQF_PERCPU))
2325 return -EINVAL;
2326
2327 if (!handler)
2328 return -EINVAL;
2329
2330 desc = irq_to_desc(irq);
2331
2332 if (!desc || (irq_settings_can_autoenable(desc) &&
2333 !(irqflags & IRQF_NO_AUTOEN)) ||
2334 !irq_settings_can_request(desc) ||
2335 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2336 !irq_supports_nmi(desc))
2337 return -EINVAL;
2338
2339 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2340 if (!action)
2341 return -ENOMEM;
2342
2343 action->handler = handler;
2344 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2345 action->name = name;
2346 action->dev_id = dev_id;
2347
2348 retval = irq_chip_pm_get(&desc->irq_data);
2349 if (retval < 0)
2350 goto err_out;
2351
2352 retval = __setup_irq(irq, desc, action);
2353 if (retval)
2354 goto err_irq_setup;
2355
2356 raw_spin_lock_irqsave(&desc->lock, flags);
2357
2358 /* Setup NMI state */
2359 desc->istate |= IRQS_NMI;
2360 retval = irq_nmi_setup(desc);
2361 if (retval) {
2362 __cleanup_nmi(irq, desc);
2363 raw_spin_unlock_irqrestore(&desc->lock, flags);
2364 return -EINVAL;
2365 }
2366
2367 raw_spin_unlock_irqrestore(&desc->lock, flags);
2368
2369 return 0;
2370
2371 err_irq_setup:
2372 irq_chip_pm_put(&desc->irq_data);
2373 err_out:
2374 kfree(action);
2375
2376 return retval;
2377 }
2378
enable_percpu_irq(unsigned int irq,unsigned int type)2379 void enable_percpu_irq(unsigned int irq, unsigned int type)
2380 {
2381 unsigned int cpu = smp_processor_id();
2382 unsigned long flags;
2383 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2384
2385 if (!desc)
2386 return;
2387
2388 /*
2389 * If the trigger type is not specified by the caller, then
2390 * use the default for this interrupt.
2391 */
2392 type &= IRQ_TYPE_SENSE_MASK;
2393 if (type == IRQ_TYPE_NONE)
2394 type = irqd_get_trigger_type(&desc->irq_data);
2395
2396 if (type != IRQ_TYPE_NONE) {
2397 int ret;
2398
2399 ret = __irq_set_trigger(desc, type);
2400
2401 if (ret) {
2402 WARN(1, "failed to set type for IRQ%d\n", irq);
2403 goto out;
2404 }
2405 }
2406
2407 irq_percpu_enable(desc, cpu);
2408 out:
2409 irq_put_desc_unlock(desc, flags);
2410 }
2411 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2412
enable_percpu_nmi(unsigned int irq,unsigned int type)2413 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2414 {
2415 enable_percpu_irq(irq, type);
2416 }
2417
2418 /**
2419 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2420 * @irq: Linux irq number to check for
2421 *
2422 * Must be called from a non migratable context. Returns the enable
2423 * state of a per cpu interrupt on the current cpu.
2424 */
irq_percpu_is_enabled(unsigned int irq)2425 bool irq_percpu_is_enabled(unsigned int irq)
2426 {
2427 unsigned int cpu = smp_processor_id();
2428 struct irq_desc *desc;
2429 unsigned long flags;
2430 bool is_enabled;
2431
2432 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2433 if (!desc)
2434 return false;
2435
2436 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2437 irq_put_desc_unlock(desc, flags);
2438
2439 return is_enabled;
2440 }
2441 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2442
disable_percpu_irq(unsigned int irq)2443 void disable_percpu_irq(unsigned int irq)
2444 {
2445 unsigned int cpu = smp_processor_id();
2446 unsigned long flags;
2447 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2448
2449 if (!desc)
2450 return;
2451
2452 irq_percpu_disable(desc, cpu);
2453 irq_put_desc_unlock(desc, flags);
2454 }
2455 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2456
disable_percpu_nmi(unsigned int irq)2457 void disable_percpu_nmi(unsigned int irq)
2458 {
2459 disable_percpu_irq(irq);
2460 }
2461
2462 /*
2463 * Internal function to unregister a percpu irqaction.
2464 */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)2465 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2466 {
2467 struct irq_desc *desc = irq_to_desc(irq);
2468 struct irqaction *action;
2469 unsigned long flags;
2470
2471 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2472
2473 if (!desc)
2474 return NULL;
2475
2476 raw_spin_lock_irqsave(&desc->lock, flags);
2477
2478 action = desc->action;
2479 if (!action || action->percpu_dev_id != dev_id) {
2480 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2481 goto bad;
2482 }
2483
2484 if (!cpumask_empty(desc->percpu_enabled)) {
2485 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2486 irq, cpumask_first(desc->percpu_enabled));
2487 goto bad;
2488 }
2489
2490 /* Found it - now remove it from the list of entries: */
2491 desc->action = NULL;
2492
2493 desc->istate &= ~IRQS_NMI;
2494
2495 raw_spin_unlock_irqrestore(&desc->lock, flags);
2496
2497 unregister_handler_proc(irq, action);
2498
2499 irq_chip_pm_put(&desc->irq_data);
2500 module_put(desc->owner);
2501 return action;
2502
2503 bad:
2504 raw_spin_unlock_irqrestore(&desc->lock, flags);
2505 return NULL;
2506 }
2507
2508 /**
2509 * remove_percpu_irq - free a per-cpu interrupt
2510 * @irq: Interrupt line to free
2511 * @act: irqaction for the interrupt
2512 *
2513 * Used to remove interrupts statically setup by the early boot process.
2514 */
remove_percpu_irq(unsigned int irq,struct irqaction * act)2515 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2516 {
2517 struct irq_desc *desc = irq_to_desc(irq);
2518
2519 if (desc && irq_settings_is_per_cpu_devid(desc))
2520 __free_percpu_irq(irq, act->percpu_dev_id);
2521 }
2522
2523 /**
2524 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2525 * @irq: Interrupt line to free
2526 * @dev_id: Device identity to free
2527 *
2528 * Remove a percpu interrupt handler. The handler is removed, but
2529 * the interrupt line is not disabled. This must be done on each
2530 * CPU before calling this function. The function does not return
2531 * until any executing interrupts for this IRQ have completed.
2532 *
2533 * This function must not be called from interrupt context.
2534 */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)2535 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2536 {
2537 struct irq_desc *desc = irq_to_desc(irq);
2538
2539 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2540 return;
2541
2542 chip_bus_lock(desc);
2543 kfree(__free_percpu_irq(irq, dev_id));
2544 chip_bus_sync_unlock(desc);
2545 }
2546 EXPORT_SYMBOL_GPL(free_percpu_irq);
2547
free_percpu_nmi(unsigned int irq,void __percpu * dev_id)2548 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2549 {
2550 struct irq_desc *desc = irq_to_desc(irq);
2551
2552 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2553 return;
2554
2555 if (WARN_ON(!irq_is_nmi(desc)))
2556 return;
2557
2558 kfree(__free_percpu_irq(irq, dev_id));
2559 }
2560
2561 /**
2562 * setup_percpu_irq - setup a per-cpu interrupt
2563 * @irq: Interrupt line to setup
2564 * @act: irqaction for the interrupt
2565 *
2566 * Used to statically setup per-cpu interrupts in the early boot process.
2567 */
setup_percpu_irq(unsigned int irq,struct irqaction * act)2568 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2569 {
2570 struct irq_desc *desc = irq_to_desc(irq);
2571 int retval;
2572
2573 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2574 return -EINVAL;
2575
2576 retval = irq_chip_pm_get(&desc->irq_data);
2577 if (retval < 0)
2578 return retval;
2579
2580 retval = __setup_irq(irq, desc, act);
2581
2582 if (retval)
2583 irq_chip_pm_put(&desc->irq_data);
2584
2585 return retval;
2586 }
2587
2588 /**
2589 * __request_percpu_irq - allocate a percpu interrupt line
2590 * @irq: Interrupt line to allocate
2591 * @handler: Function to be called when the IRQ occurs.
2592 * @flags: Interrupt type flags (IRQF_TIMER only)
2593 * @devname: An ascii name for the claiming device
2594 * @dev_id: A percpu cookie passed back to the handler function
2595 *
2596 * This call allocates interrupt resources and enables the
2597 * interrupt on the local CPU. If the interrupt is supposed to be
2598 * enabled on other CPUs, it has to be done on each CPU using
2599 * enable_percpu_irq().
2600 *
2601 * Dev_id must be globally unique. It is a per-cpu variable, and
2602 * the handler gets called with the interrupted CPU's instance of
2603 * that variable.
2604 */
__request_percpu_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * devname,void __percpu * dev_id)2605 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2606 unsigned long flags, const char *devname,
2607 void __percpu *dev_id)
2608 {
2609 struct irqaction *action;
2610 struct irq_desc *desc;
2611 int retval;
2612
2613 if (!dev_id)
2614 return -EINVAL;
2615
2616 desc = irq_to_desc(irq);
2617 if (!desc || !irq_settings_can_request(desc) ||
2618 !irq_settings_is_per_cpu_devid(desc))
2619 return -EINVAL;
2620
2621 if (flags && flags != IRQF_TIMER)
2622 return -EINVAL;
2623
2624 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2625 if (!action)
2626 return -ENOMEM;
2627
2628 action->handler = handler;
2629 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2630 action->name = devname;
2631 action->percpu_dev_id = dev_id;
2632
2633 retval = irq_chip_pm_get(&desc->irq_data);
2634 if (retval < 0) {
2635 kfree(action);
2636 return retval;
2637 }
2638
2639 retval = __setup_irq(irq, desc, action);
2640
2641 if (retval) {
2642 irq_chip_pm_put(&desc->irq_data);
2643 kfree(action);
2644 }
2645
2646 return retval;
2647 }
2648 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2649
2650 /**
2651 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2652 * @irq: Interrupt line to allocate
2653 * @handler: Function to be called when the IRQ occurs.
2654 * @name: An ascii name for the claiming device
2655 * @dev_id: A percpu cookie passed back to the handler function
2656 *
2657 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2658 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2659 * being enabled on the same CPU by using enable_percpu_nmi().
2660 *
2661 * Dev_id must be globally unique. It is a per-cpu variable, and
2662 * the handler gets called with the interrupted CPU's instance of
2663 * that variable.
2664 *
2665 * Interrupt lines requested for NMI delivering should have auto enabling
2666 * setting disabled.
2667 *
2668 * If the interrupt line cannot be used to deliver NMIs, function
2669 * will fail returning a negative value.
2670 */
request_percpu_nmi(unsigned int irq,irq_handler_t handler,const char * name,void __percpu * dev_id)2671 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2672 const char *name, void __percpu *dev_id)
2673 {
2674 struct irqaction *action;
2675 struct irq_desc *desc;
2676 unsigned long flags;
2677 int retval;
2678
2679 if (!handler)
2680 return -EINVAL;
2681
2682 desc = irq_to_desc(irq);
2683
2684 if (!desc || !irq_settings_can_request(desc) ||
2685 !irq_settings_is_per_cpu_devid(desc) ||
2686 irq_settings_can_autoenable(desc) ||
2687 !irq_supports_nmi(desc))
2688 return -EINVAL;
2689
2690 /* The line cannot already be NMI */
2691 if (irq_is_nmi(desc))
2692 return -EINVAL;
2693
2694 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2695 if (!action)
2696 return -ENOMEM;
2697
2698 action->handler = handler;
2699 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2700 | IRQF_NOBALANCING;
2701 action->name = name;
2702 action->percpu_dev_id = dev_id;
2703
2704 retval = irq_chip_pm_get(&desc->irq_data);
2705 if (retval < 0)
2706 goto err_out;
2707
2708 retval = __setup_irq(irq, desc, action);
2709 if (retval)
2710 goto err_irq_setup;
2711
2712 raw_spin_lock_irqsave(&desc->lock, flags);
2713 desc->istate |= IRQS_NMI;
2714 raw_spin_unlock_irqrestore(&desc->lock, flags);
2715
2716 return 0;
2717
2718 err_irq_setup:
2719 irq_chip_pm_put(&desc->irq_data);
2720 err_out:
2721 kfree(action);
2722
2723 return retval;
2724 }
2725
2726 /**
2727 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2728 * @irq: Interrupt line to prepare for NMI delivery
2729 *
2730 * This call prepares an interrupt line to deliver NMI on the current CPU,
2731 * before that interrupt line gets enabled with enable_percpu_nmi().
2732 *
2733 * As a CPU local operation, this should be called from non-preemptible
2734 * context.
2735 *
2736 * If the interrupt line cannot be used to deliver NMIs, function
2737 * will fail returning a negative value.
2738 */
prepare_percpu_nmi(unsigned int irq)2739 int prepare_percpu_nmi(unsigned int irq)
2740 {
2741 unsigned long flags;
2742 struct irq_desc *desc;
2743 int ret = 0;
2744
2745 WARN_ON(preemptible());
2746
2747 desc = irq_get_desc_lock(irq, &flags,
2748 IRQ_GET_DESC_CHECK_PERCPU);
2749 if (!desc)
2750 return -EINVAL;
2751
2752 if (WARN(!irq_is_nmi(desc),
2753 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2754 irq)) {
2755 ret = -EINVAL;
2756 goto out;
2757 }
2758
2759 ret = irq_nmi_setup(desc);
2760 if (ret) {
2761 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2762 goto out;
2763 }
2764
2765 out:
2766 irq_put_desc_unlock(desc, flags);
2767 return ret;
2768 }
2769
2770 /**
2771 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2772 * @irq: Interrupt line from which CPU local NMI configuration should be
2773 * removed
2774 *
2775 * This call undoes the setup done by prepare_percpu_nmi().
2776 *
2777 * IRQ line should not be enabled for the current CPU.
2778 *
2779 * As a CPU local operation, this should be called from non-preemptible
2780 * context.
2781 */
teardown_percpu_nmi(unsigned int irq)2782 void teardown_percpu_nmi(unsigned int irq)
2783 {
2784 unsigned long flags;
2785 struct irq_desc *desc;
2786
2787 WARN_ON(preemptible());
2788
2789 desc = irq_get_desc_lock(irq, &flags,
2790 IRQ_GET_DESC_CHECK_PERCPU);
2791 if (!desc)
2792 return;
2793
2794 if (WARN_ON(!irq_is_nmi(desc)))
2795 goto out;
2796
2797 irq_nmi_teardown(desc);
2798 out:
2799 irq_put_desc_unlock(desc, flags);
2800 }
2801
__irq_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)2802 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2803 bool *state)
2804 {
2805 struct irq_chip *chip;
2806 int err = -EINVAL;
2807
2808 do {
2809 chip = irq_data_get_irq_chip(data);
2810 if (WARN_ON_ONCE(!chip))
2811 return -ENODEV;
2812 if (chip->irq_get_irqchip_state)
2813 break;
2814 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2815 data = data->parent_data;
2816 #else
2817 data = NULL;
2818 #endif
2819 } while (data);
2820
2821 if (data)
2822 err = chip->irq_get_irqchip_state(data, which, state);
2823 return err;
2824 }
2825
2826 /**
2827 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2828 * @irq: Interrupt line that is forwarded to a VM
2829 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2830 * @state: a pointer to a boolean where the state is to be stored
2831 *
2832 * This call snapshots the internal irqchip state of an
2833 * interrupt, returning into @state the bit corresponding to
2834 * stage @which
2835 *
2836 * This function should be called with preemption disabled if the
2837 * interrupt controller has per-cpu registers.
2838 */
irq_get_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool * state)2839 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2840 bool *state)
2841 {
2842 struct irq_desc *desc;
2843 struct irq_data *data;
2844 unsigned long flags;
2845 int err = -EINVAL;
2846
2847 desc = irq_get_desc_buslock(irq, &flags, 0);
2848 if (!desc)
2849 return err;
2850
2851 data = irq_desc_get_irq_data(desc);
2852
2853 err = __irq_get_irqchip_state(data, which, state);
2854
2855 irq_put_desc_busunlock(desc, flags);
2856 return err;
2857 }
2858 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2859
2860 /**
2861 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2862 * @irq: Interrupt line that is forwarded to a VM
2863 * @which: State to be restored (one of IRQCHIP_STATE_*)
2864 * @val: Value corresponding to @which
2865 *
2866 * This call sets the internal irqchip state of an interrupt,
2867 * depending on the value of @which.
2868 *
2869 * This function should be called with migration disabled if the
2870 * interrupt controller has per-cpu registers.
2871 */
irq_set_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool val)2872 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2873 bool val)
2874 {
2875 struct irq_desc *desc;
2876 struct irq_data *data;
2877 struct irq_chip *chip;
2878 unsigned long flags;
2879 int err = -EINVAL;
2880
2881 desc = irq_get_desc_buslock(irq, &flags, 0);
2882 if (!desc)
2883 return err;
2884
2885 data = irq_desc_get_irq_data(desc);
2886
2887 do {
2888 chip = irq_data_get_irq_chip(data);
2889 if (WARN_ON_ONCE(!chip)) {
2890 err = -ENODEV;
2891 goto out_unlock;
2892 }
2893 if (chip->irq_set_irqchip_state)
2894 break;
2895 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2896 data = data->parent_data;
2897 #else
2898 data = NULL;
2899 #endif
2900 } while (data);
2901
2902 if (data)
2903 err = chip->irq_set_irqchip_state(data, which, val);
2904
2905 out_unlock:
2906 irq_put_desc_busunlock(desc, flags);
2907 return err;
2908 }
2909 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2910
2911 /**
2912 * irq_has_action - Check whether an interrupt is requested
2913 * @irq: The linux irq number
2914 *
2915 * Returns: A snapshot of the current state
2916 */
irq_has_action(unsigned int irq)2917 bool irq_has_action(unsigned int irq)
2918 {
2919 bool res;
2920
2921 rcu_read_lock();
2922 res = irq_desc_has_action(irq_to_desc(irq));
2923 rcu_read_unlock();
2924 return res;
2925 }
2926 EXPORT_SYMBOL_GPL(irq_has_action);
2927
2928 /**
2929 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2930 * @irq: The linux irq number
2931 * @bitmask: The bitmask to evaluate
2932 *
2933 * Returns: True if one of the bits in @bitmask is set
2934 */
irq_check_status_bit(unsigned int irq,unsigned int bitmask)2935 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2936 {
2937 struct irq_desc *desc;
2938 bool res = false;
2939
2940 rcu_read_lock();
2941 desc = irq_to_desc(irq);
2942 if (desc)
2943 res = !!(desc->status_use_accessors & bitmask);
2944 rcu_read_unlock();
2945 return res;
2946 }
2947 EXPORT_SYMBOL_GPL(irq_check_status_bit);
2948