1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* interrupt.h */ 3 #ifndef _LINUX_INTERRUPT_H 4 #define _LINUX_INTERRUPT_H 5 6 #include <linux/kernel.h> 7 #include <linux/bitops.h> 8 #include <linux/cpumask.h> 9 #include <linux/irqreturn.h> 10 #include <linux/irqnr.h> 11 #include <linux/hardirq.h> 12 #include <linux/irqflags.h> 13 #include <linux/hrtimer.h> 14 #include <linux/kref.h> 15 #include <linux/workqueue.h> 16 17 #include <linux/atomic.h> 18 #include <asm/ptrace.h> 19 #include <asm/irq.h> 20 #include <asm/sections.h> 21 22 /* 23 * These correspond to the IORESOURCE_IRQ_* defines in 24 * linux/ioport.h to select the interrupt line behaviour. When 25 * requesting an interrupt without specifying a IRQF_TRIGGER, the 26 * setting should be assumed to be "as already configured", which 27 * may be as per machine or firmware initialisation. 28 */ 29 #define IRQF_TRIGGER_NONE 0x00000000 30 #define IRQF_TRIGGER_RISING 0x00000001 31 #define IRQF_TRIGGER_FALLING 0x00000002 32 #define IRQF_TRIGGER_HIGH 0x00000004 33 #define IRQF_TRIGGER_LOW 0x00000008 34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 36 #define IRQF_TRIGGER_PROBE 0x00000010 37 38 /* 39 * These flags used only by the kernel as part of the 40 * irq handling routines. 41 * 42 * IRQF_SHARED - allow sharing the irq among several devices 43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 45 * IRQF_PERCPU - Interrupt is per cpu 46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 48 * registered first in a shared interrupt is considered for 49 * performance reasons) 50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 51 * Used by threaded interrupts which need to keep the 52 * irq line disabled until the threaded handler has been run. 53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee 54 * that this interrupt will wake the system from a suspended 55 * state. See Documentation/power/suspend-and-interrupts.rst 56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 57 * IRQF_NO_THREAD - Interrupt cannot be threaded 58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 59 * resume time. 60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this 61 * interrupt handler after suspending interrupts. For system 62 * wakeup devices users need to implement wakeup detection in 63 * their interrupt handlers. 64 */ 65 #define IRQF_SHARED 0x00000080 66 #define IRQF_PROBE_SHARED 0x00000100 67 #define __IRQF_TIMER 0x00000200 68 #define IRQF_PERCPU 0x00000400 69 #define IRQF_NOBALANCING 0x00000800 70 #define IRQF_IRQPOLL 0x00001000 71 #define IRQF_ONESHOT 0x00002000 72 #define IRQF_NO_SUSPEND 0x00004000 73 #define IRQF_FORCE_RESUME 0x00008000 74 #define IRQF_NO_THREAD 0x00010000 75 #define IRQF_EARLY_RESUME 0x00020000 76 #define IRQF_COND_SUSPEND 0x00040000 77 78 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 79 80 /* 81 * These values can be returned by request_any_context_irq() and 82 * describe the context the interrupt will be run in. 83 * 84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 86 */ 87 enum { 88 IRQC_IS_HARDIRQ = 0, 89 IRQC_IS_NESTED, 90 }; 91 92 typedef irqreturn_t (*irq_handler_t)(int, void *); 93 94 /** 95 * struct irqaction - per interrupt action descriptor 96 * @handler: interrupt handler function 97 * @name: name of the device 98 * @dev_id: cookie to identify the device 99 * @percpu_dev_id: cookie to identify the device 100 * @next: pointer to the next irqaction for shared interrupts 101 * @irq: interrupt number 102 * @flags: flags (see IRQF_* above) 103 * @thread_fn: interrupt handler function for threaded interrupts 104 * @thread: thread pointer for threaded interrupts 105 * @secondary: pointer to secondary irqaction (force threading) 106 * @thread_flags: flags related to @thread 107 * @thread_mask: bitmask for keeping track of @thread activity 108 * @dir: pointer to the proc/irq/NN/name entry 109 */ 110 struct irqaction { 111 irq_handler_t handler; 112 void *dev_id; 113 void __percpu *percpu_dev_id; 114 struct irqaction *next; 115 irq_handler_t thread_fn; 116 struct task_struct *thread; 117 struct irqaction *secondary; 118 unsigned int irq; 119 unsigned int flags; 120 unsigned long thread_flags; 121 unsigned long thread_mask; 122 const char *name; 123 struct proc_dir_entry *dir; 124 } ____cacheline_internodealigned_in_smp; 125 126 extern irqreturn_t no_action(int cpl, void *dev_id); 127 128 /* 129 * If a (PCI) device interrupt is not connected we set dev->irq to 130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we 131 * can distingiush that case from other error returns. 132 * 133 * 0x80000000 is guaranteed to be outside the available range of interrupts 134 * and easy to distinguish from other possible incorrect values. 135 */ 136 #define IRQ_NOTCONNECTED (1U << 31) 137 138 extern int __must_check 139 request_threaded_irq(unsigned int irq, irq_handler_t handler, 140 irq_handler_t thread_fn, 141 unsigned long flags, const char *name, void *dev); 142 143 static inline int __must_check 144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 145 const char *name, void *dev) 146 { 147 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 148 } 149 150 extern int __must_check 151 request_any_context_irq(unsigned int irq, irq_handler_t handler, 152 unsigned long flags, const char *name, void *dev_id); 153 154 extern int __must_check 155 __request_percpu_irq(unsigned int irq, irq_handler_t handler, 156 unsigned long flags, const char *devname, 157 void __percpu *percpu_dev_id); 158 159 extern int __must_check 160 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, 161 const char *name, void *dev); 162 163 static inline int __must_check 164 request_percpu_irq(unsigned int irq, irq_handler_t handler, 165 const char *devname, void __percpu *percpu_dev_id) 166 { 167 return __request_percpu_irq(irq, handler, 0, 168 devname, percpu_dev_id); 169 } 170 171 extern int __must_check 172 request_percpu_nmi(unsigned int irq, irq_handler_t handler, 173 const char *devname, void __percpu *dev); 174 175 extern const void *free_irq(unsigned int, void *); 176 extern void free_percpu_irq(unsigned int, void __percpu *); 177 178 extern const void *free_nmi(unsigned int irq, void *dev_id); 179 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); 180 181 struct device; 182 183 extern int __must_check 184 devm_request_threaded_irq(struct device *dev, unsigned int irq, 185 irq_handler_t handler, irq_handler_t thread_fn, 186 unsigned long irqflags, const char *devname, 187 void *dev_id); 188 189 static inline int __must_check 190 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 191 unsigned long irqflags, const char *devname, void *dev_id) 192 { 193 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 194 devname, dev_id); 195 } 196 197 extern int __must_check 198 devm_request_any_context_irq(struct device *dev, unsigned int irq, 199 irq_handler_t handler, unsigned long irqflags, 200 const char *devname, void *dev_id); 201 202 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 203 204 /* 205 * On lockdep we dont want to enable hardirqs in hardirq 206 * context. Use local_irq_enable_in_hardirq() to annotate 207 * kernel code that has to do this nevertheless (pretty much 208 * the only valid case is for old/broken hardware that is 209 * insanely slow). 210 * 211 * NOTE: in theory this might break fragile code that relies 212 * on hardirq delivery - in practice we dont seem to have such 213 * places left. So the only effect should be slightly increased 214 * irqs-off latencies. 215 */ 216 #ifdef CONFIG_LOCKDEP 217 # define local_irq_enable_in_hardirq() do { } while (0) 218 #else 219 # define local_irq_enable_in_hardirq() local_irq_enable() 220 #endif 221 222 extern void disable_irq_nosync(unsigned int irq); 223 extern bool disable_hardirq(unsigned int irq); 224 extern void disable_irq(unsigned int irq); 225 extern void disable_percpu_irq(unsigned int irq); 226 extern void enable_irq(unsigned int irq); 227 extern void enable_percpu_irq(unsigned int irq, unsigned int type); 228 extern bool irq_percpu_is_enabled(unsigned int irq); 229 extern void irq_wake_thread(unsigned int irq, void *dev_id); 230 231 extern void disable_nmi_nosync(unsigned int irq); 232 extern void disable_percpu_nmi(unsigned int irq); 233 extern void enable_nmi(unsigned int irq); 234 extern void enable_percpu_nmi(unsigned int irq, unsigned int type); 235 extern int prepare_percpu_nmi(unsigned int irq); 236 extern void teardown_percpu_nmi(unsigned int irq); 237 238 /* The following three functions are for the core kernel use only. */ 239 extern void suspend_device_irqs(void); 240 extern void resume_device_irqs(void); 241 extern void rearm_wake_irq(unsigned int irq); 242 243 /** 244 * struct irq_affinity_notify - context for notification of IRQ affinity changes 245 * @irq: Interrupt to which notification applies 246 * @kref: Reference count, for internal use 247 * @work: Work item, for internal use 248 * @notify: Function to be called on change. This will be 249 * called in process context. 250 * @release: Function to be called on release. This will be 251 * called in process context. Once registered, the 252 * structure must only be freed when this function is 253 * called or later. 254 */ 255 struct irq_affinity_notify { 256 unsigned int irq; 257 struct kref kref; 258 struct work_struct work; 259 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 260 void (*release)(struct kref *ref); 261 }; 262 263 #define IRQ_AFFINITY_MAX_SETS 4 264 265 /** 266 * struct irq_affinity - Description for automatic irq affinity assignements 267 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 268 * the MSI(-X) vector space 269 * @post_vectors: Don't apply affinity to @post_vectors at end of 270 * the MSI(-X) vector space 271 * @nr_sets: The number of interrupt sets for which affinity 272 * spreading is required 273 * @set_size: Array holding the size of each interrupt set 274 * @calc_sets: Callback for calculating the number and size 275 * of interrupt sets 276 * @priv: Private data for usage by @calc_sets, usually a 277 * pointer to driver/device specific data. 278 */ 279 struct irq_affinity { 280 unsigned int pre_vectors; 281 unsigned int post_vectors; 282 unsigned int nr_sets; 283 unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; 284 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); 285 void *priv; 286 }; 287 288 /** 289 * struct irq_affinity_desc - Interrupt affinity descriptor 290 * @mask: cpumask to hold the affinity assignment 291 * @is_managed: 1 if the interrupt is managed internally 292 */ 293 struct irq_affinity_desc { 294 struct cpumask mask; 295 unsigned int is_managed : 1; 296 }; 297 298 #if defined(CONFIG_SMP) 299 300 extern cpumask_var_t irq_default_affinity; 301 302 /* Internal implementation. Use the helpers below */ 303 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, 304 bool force); 305 306 /** 307 * irq_set_affinity - Set the irq affinity of a given irq 308 * @irq: Interrupt to set affinity 309 * @cpumask: cpumask 310 * 311 * Fails if cpumask does not contain an online CPU 312 */ 313 static inline int 314 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 315 { 316 return __irq_set_affinity(irq, cpumask, false); 317 } 318 319 /** 320 * irq_force_affinity - Force the irq affinity of a given irq 321 * @irq: Interrupt to set affinity 322 * @cpumask: cpumask 323 * 324 * Same as irq_set_affinity, but without checking the mask against 325 * online cpus. 326 * 327 * Solely for low level cpu hotplug code, where we need to make per 328 * cpu interrupts affine before the cpu becomes online. 329 */ 330 static inline int 331 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 332 { 333 return __irq_set_affinity(irq, cpumask, true); 334 } 335 336 extern int irq_can_set_affinity(unsigned int irq); 337 extern int irq_select_affinity(unsigned int irq); 338 339 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 340 341 extern int 342 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 343 344 struct irq_affinity_desc * 345 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); 346 347 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 348 const struct irq_affinity *affd); 349 350 #else /* CONFIG_SMP */ 351 352 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 353 { 354 return -EINVAL; 355 } 356 357 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 358 { 359 return 0; 360 } 361 362 static inline int irq_can_set_affinity(unsigned int irq) 363 { 364 return 0; 365 } 366 367 static inline int irq_select_affinity(unsigned int irq) { return 0; } 368 369 static inline int irq_set_affinity_hint(unsigned int irq, 370 const struct cpumask *m) 371 { 372 return -EINVAL; 373 } 374 375 static inline int 376 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 377 { 378 return 0; 379 } 380 381 static inline struct irq_affinity_desc * 382 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) 383 { 384 return NULL; 385 } 386 387 static inline unsigned int 388 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 389 const struct irq_affinity *affd) 390 { 391 return maxvec; 392 } 393 394 #endif /* CONFIG_SMP */ 395 396 /* 397 * Special lockdep variants of irq disabling/enabling. 398 * These should be used for locking constructs that 399 * know that a particular irq context which is disabled, 400 * and which is the only irq-context user of a lock, 401 * that it's safe to take the lock in the irq-disabled 402 * section without disabling hardirqs. 403 * 404 * On !CONFIG_LOCKDEP they are equivalent to the normal 405 * irq disable/enable methods. 406 */ 407 static inline void disable_irq_nosync_lockdep(unsigned int irq) 408 { 409 disable_irq_nosync(irq); 410 #ifdef CONFIG_LOCKDEP 411 local_irq_disable(); 412 #endif 413 } 414 415 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 416 { 417 disable_irq_nosync(irq); 418 #ifdef CONFIG_LOCKDEP 419 local_irq_save(*flags); 420 #endif 421 } 422 423 static inline void disable_irq_lockdep(unsigned int irq) 424 { 425 disable_irq(irq); 426 #ifdef CONFIG_LOCKDEP 427 local_irq_disable(); 428 #endif 429 } 430 431 static inline void enable_irq_lockdep(unsigned int irq) 432 { 433 #ifdef CONFIG_LOCKDEP 434 local_irq_enable(); 435 #endif 436 enable_irq(irq); 437 } 438 439 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 440 { 441 #ifdef CONFIG_LOCKDEP 442 local_irq_restore(*flags); 443 #endif 444 enable_irq(irq); 445 } 446 447 /* IRQ wakeup (PM) control: */ 448 extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 449 450 static inline int enable_irq_wake(unsigned int irq) 451 { 452 return irq_set_irq_wake(irq, 1); 453 } 454 455 static inline int disable_irq_wake(unsigned int irq) 456 { 457 return irq_set_irq_wake(irq, 0); 458 } 459 460 /* 461 * irq_get_irqchip_state/irq_set_irqchip_state specific flags 462 */ 463 enum irqchip_irq_state { 464 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ 465 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ 466 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ 467 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ 468 }; 469 470 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 471 bool *state); 472 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 473 bool state); 474 475 #ifdef CONFIG_IRQ_FORCED_THREADING 476 # ifdef CONFIG_PREEMPT_RT 477 # define force_irqthreads (true) 478 # else 479 extern bool force_irqthreads; 480 # endif 481 #else 482 #define force_irqthreads (0) 483 #endif 484 485 #ifndef local_softirq_pending 486 487 #ifndef local_softirq_pending_ref 488 #define local_softirq_pending_ref irq_stat.__softirq_pending 489 #endif 490 491 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) 492 #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) 493 #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) 494 495 #endif /* local_softirq_pending */ 496 497 /* Some architectures might implement lazy enabling/disabling of 498 * interrupts. In some cases, such as stop_machine, we might want 499 * to ensure that after a local_irq_disable(), interrupts have 500 * really been disabled in hardware. Such architectures need to 501 * implement the following hook. 502 */ 503 #ifndef hard_irq_disable 504 #define hard_irq_disable() do { } while(0) 505 #endif 506 507 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 508 frequency threaded job scheduling. For almost all the purposes 509 tasklets are more than enough. F.e. all serial device BHs et 510 al. should be converted to tasklets, not to softirqs. 511 */ 512 513 enum 514 { 515 HI_SOFTIRQ=0, 516 TIMER_SOFTIRQ, 517 NET_TX_SOFTIRQ, 518 NET_RX_SOFTIRQ, 519 BLOCK_SOFTIRQ, 520 IRQ_POLL_SOFTIRQ, 521 TASKLET_SOFTIRQ, 522 SCHED_SOFTIRQ, 523 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the 524 numbering. Sigh! */ 525 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 526 527 NR_SOFTIRQS 528 }; 529 530 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) 531 532 /* map softirq index to softirq name. update 'softirq_to_name' in 533 * kernel/softirq.c when adding a new softirq. 534 */ 535 extern const char * const softirq_to_name[NR_SOFTIRQS]; 536 537 /* softirq mask and active fields moved to irq_cpustat_t in 538 * asm/hardirq.h to get better cache usage. KAO 539 */ 540 541 struct softirq_action 542 { 543 void (*action)(struct softirq_action *); 544 }; 545 546 asmlinkage void do_softirq(void); 547 asmlinkage void __do_softirq(void); 548 549 #ifdef __ARCH_HAS_DO_SOFTIRQ 550 void do_softirq_own_stack(void); 551 #else 552 static inline void do_softirq_own_stack(void) 553 { 554 __do_softirq(); 555 } 556 #endif 557 558 extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 559 extern void softirq_init(void); 560 extern void __raise_softirq_irqoff(unsigned int nr); 561 562 extern void raise_softirq_irqoff(unsigned int nr); 563 extern void raise_softirq(unsigned int nr); 564 565 DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 566 567 static inline struct task_struct *this_cpu_ksoftirqd(void) 568 { 569 return this_cpu_read(ksoftirqd); 570 } 571 572 /* Tasklets --- multithreaded analogue of BHs. 573 574 Main feature differing them of generic softirqs: tasklet 575 is running only on one CPU simultaneously. 576 577 Main feature differing them of BHs: different tasklets 578 may be run simultaneously on different CPUs. 579 580 Properties: 581 * If tasklet_schedule() is called, then tasklet is guaranteed 582 to be executed on some cpu at least once after this. 583 * If the tasklet is already scheduled, but its execution is still not 584 started, it will be executed only once. 585 * If this tasklet is already running on another CPU (or schedule is called 586 from tasklet itself), it is rescheduled for later. 587 * Tasklet is strictly serialized wrt itself, but not 588 wrt another tasklets. If client needs some intertask synchronization, 589 he makes it with spinlocks. 590 */ 591 592 struct tasklet_struct 593 { 594 struct tasklet_struct *next; 595 unsigned long state; 596 atomic_t count; 597 void (*func)(unsigned long); 598 unsigned long data; 599 }; 600 601 #define DECLARE_TASKLET(name, func, data) \ 602 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } 603 604 #define DECLARE_TASKLET_DISABLED(name, func, data) \ 605 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } 606 607 608 enum 609 { 610 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 611 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 612 }; 613 614 #ifdef CONFIG_SMP 615 static inline int tasklet_trylock(struct tasklet_struct *t) 616 { 617 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 618 } 619 620 static inline void tasklet_unlock(struct tasklet_struct *t) 621 { 622 smp_mb__before_atomic(); 623 clear_bit(TASKLET_STATE_RUN, &(t)->state); 624 } 625 626 static inline void tasklet_unlock_wait(struct tasklet_struct *t) 627 { 628 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } 629 } 630 #else 631 #define tasklet_trylock(t) 1 632 #define tasklet_unlock_wait(t) do { } while (0) 633 #define tasklet_unlock(t) do { } while (0) 634 #endif 635 636 extern void __tasklet_schedule(struct tasklet_struct *t); 637 638 static inline void tasklet_schedule(struct tasklet_struct *t) 639 { 640 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 641 __tasklet_schedule(t); 642 } 643 644 extern void __tasklet_hi_schedule(struct tasklet_struct *t); 645 646 static inline void tasklet_hi_schedule(struct tasklet_struct *t) 647 { 648 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 649 __tasklet_hi_schedule(t); 650 } 651 652 static inline void tasklet_disable_nosync(struct tasklet_struct *t) 653 { 654 atomic_inc(&t->count); 655 smp_mb__after_atomic(); 656 } 657 658 static inline void tasklet_disable(struct tasklet_struct *t) 659 { 660 tasklet_disable_nosync(t); 661 tasklet_unlock_wait(t); 662 smp_mb(); 663 } 664 665 static inline void tasklet_enable(struct tasklet_struct *t) 666 { 667 smp_mb__before_atomic(); 668 atomic_dec(&t->count); 669 } 670 671 extern void tasklet_kill(struct tasklet_struct *t); 672 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 673 extern void tasklet_init(struct tasklet_struct *t, 674 void (*func)(unsigned long), unsigned long data); 675 676 /* 677 * Autoprobing for irqs: 678 * 679 * probe_irq_on() and probe_irq_off() provide robust primitives 680 * for accurate IRQ probing during kernel initialization. They are 681 * reasonably simple to use, are not "fooled" by spurious interrupts, 682 * and, unlike other attempts at IRQ probing, they do not get hung on 683 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 684 * 685 * For reasonably foolproof probing, use them as follows: 686 * 687 * 1. clear and/or mask the device's internal interrupt. 688 * 2. sti(); 689 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 690 * 4. enable the device and cause it to trigger an interrupt. 691 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 692 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 693 * 7. service the device to clear its pending interrupt. 694 * 8. loop again if paranoia is required. 695 * 696 * probe_irq_on() returns a mask of allocated irq's. 697 * 698 * probe_irq_off() takes the mask as a parameter, 699 * and returns the irq number which occurred, 700 * or zero if none occurred, or a negative irq number 701 * if more than one irq occurred. 702 */ 703 704 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 705 static inline unsigned long probe_irq_on(void) 706 { 707 return 0; 708 } 709 static inline int probe_irq_off(unsigned long val) 710 { 711 return 0; 712 } 713 static inline unsigned int probe_irq_mask(unsigned long val) 714 { 715 return 0; 716 } 717 #else 718 extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 719 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 720 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 721 #endif 722 723 #ifdef CONFIG_PROC_FS 724 /* Initialize /proc/irq/ */ 725 extern void init_irq_proc(void); 726 #else 727 static inline void init_irq_proc(void) 728 { 729 } 730 #endif 731 732 #ifdef CONFIG_IRQ_TIMINGS 733 void irq_timings_enable(void); 734 void irq_timings_disable(void); 735 u64 irq_timings_next_event(u64 now); 736 #endif 737 738 struct seq_file; 739 int show_interrupts(struct seq_file *p, void *v); 740 int arch_show_interrupts(struct seq_file *p, int prec); 741 742 extern int early_irq_init(void); 743 extern int arch_probe_nr_irqs(void); 744 extern int arch_early_irq_init(void); 745 746 /* 747 * We want to know which function is an entrypoint of a hardirq or a softirq. 748 */ 749 #define __irq_entry __attribute__((__section__(".irqentry.text"))) 750 #define __softirq_entry \ 751 __attribute__((__section__(".softirqentry.text"))) 752 753 #endif 754