1 /*
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2014-2020 François Tigeot <ftigeot@wolfpond.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #ifndef _LINUX_WORKQUEUE_H_
31 #define _LINUX_WORKQUEUE_H_
32
33 #include <linux/timer.h>
34 #include <linux/bitops.h>
35 #include <linux/lockdep.h>
36 #include <linux/atomic.h>
37 #include <linux/cpumask.h>
38
39 #define WQ_HIGHPRI 1
40 #define WQ_UNBOUND 2
41
42 struct workqueue_worker;
43
44 struct work_struct {
45 STAILQ_ENTRY(work_struct) ws_entries;
46 void (*func)(struct work_struct *);
47 struct workqueue_worker *worker;
48 bool on_queue;
49 bool running;
50 bool canceled;
51 };
52
53 struct workqueue_worker {
54 STAILQ_HEAD(ws_list, work_struct) ws_list_head;
55 struct thread *worker_thread;
56 struct lock worker_lock;
57 };
58
59 struct workqueue_struct {
60 bool is_draining;
61 int num_workers;
62 struct workqueue_worker (*workers)[];
63 };
64
65 struct delayed_work {
66 struct work_struct work;
67 struct callout timer;
68 };
69
70 static inline struct delayed_work *
to_delayed_work(struct work_struct * work)71 to_delayed_work(struct work_struct *work)
72 {
73
74 return container_of(work, struct delayed_work, work);
75 }
76
77 #define INIT_WORK(work, _func) \
78 do { \
79 (work)->ws_entries.stqe_next = NULL; \
80 (work)->func = (_func); \
81 (work)->on_queue = false; \
82 (work)->running = false; \
83 (work)->canceled = false; \
84 } while (0)
85
86 #define INIT_WORK_ONSTACK(work, _func) INIT_WORK(work, _func)
87
88 #define INIT_DELAYED_WORK(_work, _func) \
89 do { \
90 INIT_WORK(&(_work)->work, _func); \
91 callout_init_mp(&(_work)->timer); \
92 } while (0)
93
94 #define INIT_DELAYED_WORK_ONSTACK(work, _func) INIT_DELAYED_WORK(work, _func)
95
96 /* System-wide workqueues */
97 extern struct workqueue_struct *system_wq;
98 extern struct workqueue_struct *system_highpri_wq;
99 extern struct workqueue_struct *system_long_wq;
100 extern struct workqueue_struct *system_unbound_wq;
101 extern struct workqueue_struct *system_power_efficient_wq;
102
103 #define alloc_ordered_workqueue(name, flags) \
104 _create_workqueue_common(name, (flags) | WQ_UNBOUND)
105
106 #define alloc_workqueue(name, flags, max_active) \
107 _create_workqueue_common(name, flags)
108
109 #define create_singlethread_workqueue(name) \
110 _create_workqueue_common(name, WQ_UNBOUND)
111
112 struct workqueue_struct *_create_workqueue_common(const char *name, int flags);
113
114 int queue_work(struct workqueue_struct *wq, struct work_struct *work);
115 int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
116 unsigned long delay);
117
118 static inline bool
schedule_work(struct work_struct * work)119 schedule_work(struct work_struct *work)
120 {
121 return queue_work(system_wq, work);
122 }
123
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)124 static inline bool schedule_delayed_work(struct delayed_work *dwork,
125 unsigned long delay)
126 {
127 return queue_delayed_work(system_wq, dwork, delay);
128 }
129
130 bool cancel_work_sync(struct work_struct *work);
131 bool cancel_delayed_work(struct delayed_work *dwork);
132 bool cancel_delayed_work_sync(struct delayed_work *dwork);
133
134 /* XXX: Return value not used in drm code */
135 static inline bool
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)136 mod_delayed_work(struct workqueue_struct *wq,
137 struct delayed_work *dwork, unsigned long delay)
138 {
139 cancel_delayed_work(dwork);
140 queue_delayed_work(wq, dwork, delay);
141 return false;
142 }
143
144 void drain_workqueue(struct workqueue_struct *wq);
145 void flush_workqueue(struct workqueue_struct *wq);
146
147 bool flush_work(struct work_struct *work);
148 bool flush_delayed_work(struct delayed_work *dwork);
149
150 static inline void
flush_scheduled_work(void)151 flush_scheduled_work(void)
152 {
153 flush_workqueue(system_wq);
154 }
155
156 unsigned int work_busy(struct work_struct *work);
157 bool work_pending(struct work_struct *work);
158
159 bool delayed_work_pending(struct delayed_work *dw);
160
161 void destroy_workqueue(struct workqueue_struct *wq);
162
163 void destroy_work_on_stack(struct work_struct *work);
164
165 void destroy_delayed_work_on_stack(struct delayed_work *work);
166
167 #endif /* _LINUX_WORKQUEUE_H_ */
168