1 /* Public domain. */
2
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
5
6 #include <sys/task.h>
7
8 #include <machine/intr.h>
9 #include <linux/hardirq.h>
10 #include <linux/irqflags.h>
11 #include <linux/atomic.h>
12 #include <linux/irqreturn.h>
13
14 struct seq_file;
15
16 #define IRQF_SHARED 0x0001
17 #define IRQF_ONESHOT 0x0002
18 #define IRQF_NO_AUTOEN 0x0004
19
20 #define IRQF_TRIGGER_RISING 0x1000
21 #define IRQF_TRIGGER_FALLING 0x2000
22
23 #define request_irq(irq, hdlr, flags, name, dev) (0)
24
25 static inline void
free_irq(unsigned int irq,void * dev)26 free_irq(unsigned int irq, void *dev)
27 {
28 }
29
30 static inline void
disable_irq(u_int irq)31 disable_irq(u_int irq)
32 {
33 }
34
35 static inline void
enable_irq(u_int irq)36 enable_irq(u_int irq)
37 {
38 }
39
40 typedef irqreturn_t (*irq_handler_t)(int, void *);
41
42 static inline int
devm_request_threaded_irq(struct device * dev,u_int irq,irq_handler_t handler,irq_handler_t thread_fn,u_int irqflags,const char * devname,void * arg)43 devm_request_threaded_irq(struct device *dev, u_int irq, irq_handler_t handler,
44 irq_handler_t thread_fn, u_int irqflags, const char *devname, void *arg)
45 {
46 return 0;
47 }
48
49 struct tasklet_struct {
50 union {
51 void (*func)(unsigned long);
52 void (*callback)(struct tasklet_struct *);
53 };
54 bool use_callback;
55 unsigned long data;
56 unsigned long state;
57 atomic_t count;
58 struct task task;
59 };
60
61 #define TASKLET_STATE_SCHED 1
62 #define TASKLET_STATE_RUN 0
63
64 #define from_tasklet(x, t, f) \
65 container_of(t, typeof(*x), f)
66
67 extern struct taskq *taskletq;
68 void tasklet_run(void *);
69 void tasklet_unlock_wait(struct tasklet_struct *);
70 void tasklet_unlock_spin_wait(struct tasklet_struct *);
71
72 static inline void
tasklet_init(struct tasklet_struct * ts,void (* func)(unsigned long),unsigned long data)73 tasklet_init(struct tasklet_struct *ts, void (*func)(unsigned long),
74 unsigned long data)
75 {
76 ts->func = func;
77 ts->data = data;
78 ts->state = 0;
79 atomic_set(&ts->count, 0);
80 ts->use_callback = false;
81 task_set(&ts->task, tasklet_run, ts);
82 }
83
84 static inline void
tasklet_setup(struct tasklet_struct * ts,void (* callback)(struct tasklet_struct *))85 tasklet_setup(struct tasklet_struct *ts,
86 void (*callback)(struct tasklet_struct *))
87 {
88 ts->callback = callback;
89 ts->data = 0;
90 ts->state = 0;
91 atomic_set(&ts->count, 0);
92 ts->use_callback = true;
93 task_set(&ts->task, tasklet_run, ts);
94 }
95
96 static inline int
tasklet_trylock(struct tasklet_struct * ts)97 tasklet_trylock(struct tasklet_struct *ts)
98 {
99 return !test_and_set_bit(TASKLET_STATE_RUN, &ts->state);
100 }
101
102 static inline void
tasklet_unlock(struct tasklet_struct * ts)103 tasklet_unlock(struct tasklet_struct *ts)
104 {
105 smp_mb__before_atomic();
106 clear_bit(TASKLET_STATE_RUN, &ts->state);
107 }
108
109 static inline void
tasklet_kill(struct tasklet_struct * ts)110 tasklet_kill(struct tasklet_struct *ts)
111 {
112 clear_bit(TASKLET_STATE_SCHED, &ts->state);
113 task_del(taskletq, &ts->task);
114 tasklet_unlock_wait(ts);
115 }
116
117 static inline void
tasklet_schedule(struct tasklet_struct * ts)118 tasklet_schedule(struct tasklet_struct *ts)
119 {
120 set_bit(TASKLET_STATE_SCHED, &ts->state);
121 task_add(taskletq, &ts->task);
122 }
123
124 static inline void
tasklet_hi_schedule(struct tasklet_struct * ts)125 tasklet_hi_schedule(struct tasklet_struct *ts)
126 {
127 set_bit(TASKLET_STATE_SCHED, &ts->state);
128 task_add(taskletq, &ts->task);
129 }
130
131 static inline void
tasklet_disable_nosync(struct tasklet_struct * ts)132 tasklet_disable_nosync(struct tasklet_struct *ts)
133 {
134 atomic_inc(&ts->count);
135 smp_mb__after_atomic();
136 }
137
138 static inline void
tasklet_enable(struct tasklet_struct * ts)139 tasklet_enable(struct tasklet_struct *ts)
140 {
141 smp_mb__before_atomic();
142 atomic_dec(&ts->count);
143 }
144
145 #endif
146