xref: /dragonfly/sys/dev/drm/include/linux/workqueue.h (revision a1626531)
1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2014-2019 François Tigeot <ftigeot@wolfpond.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #ifndef	_LINUX_WORKQUEUE_H_
31 #define	_LINUX_WORKQUEUE_H_
32 
33 #include <linux/timer.h>
34 #include <linux/bitops.h>
35 #include <linux/lockdep.h>
36 #include <linux/atomic.h>
37 #include <linux/cpumask.h>
38 
39 #include <sys/taskqueue.h>
40 
41 struct workqueue_struct {
42 	struct taskqueue	*taskqueue;
43 	struct lock		flags_lock;
44 	bool			is_draining;
45 };
46 
47 struct work_struct {
48 	struct	task 		work_task;
49 	struct	taskqueue	*taskqueue;
50 	void			(*func)(struct work_struct *);
51 };
52 
53 struct delayed_work {
54 	struct work_struct	work;
55 	struct callout		timer;
56 };
57 
58 static inline struct delayed_work *
59 to_delayed_work(struct work_struct *work)
60 {
61 
62 	return container_of(work, struct delayed_work, work);
63 }
64 
65 
66 static inline void
67 _work_fn(void *context, int pending)
68 {
69 	struct work_struct *work;
70 
71 	work = context;
72 	work->func(work);
73 }
74 
75 #define	INIT_WORK(work, _func) 	 					\
76 do {									\
77 	(work)->func = (_func);						\
78 	(work)->taskqueue = NULL;					\
79 	TASK_INIT(&(work)->work_task, 0, _work_fn, (work));		\
80 } while (0)
81 
82 #define INIT_WORK_ONSTACK(work, _func)	INIT_WORK(work, _func)
83 
84 #define INIT_DELAYED_WORK(_work, _func)					\
85 do {									\
86 	INIT_WORK(&(_work)->work, _func);				\
87 	callout_init_mp(&(_work)->timer);				\
88 } while (0)
89 
90 #define	INIT_DEFERRABLE_WORK	INIT_DELAYED_WORK
91 
92 #define	schedule_work(work)						\
93 do {									\
94 	taskqueue_enqueue_optq(taskqueue_thread[mycpuid], &(work)->taskqueue, &(work)->work_task);	\
95 } while (0)
96 
97 #define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread[mycpuid])
98 
99 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
100 {
101 	/* Return opposite val to align with Linux logic */
102 	return !taskqueue_enqueue_optq((q)->taskqueue, &(work)->taskqueue, &(work)->work_task);
103 }
104 
105 static inline void
106 _delayed_work_fn(void *arg)
107 {
108 	struct delayed_work *work;
109 
110 	work = arg;
111 	taskqueue_enqueue_optq(work->work.taskqueue, &work->work.taskqueue, &work->work.work_task);
112 }
113 
114 static inline int
115 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
116     unsigned long delay)
117 {
118 	int pending;
119 
120 	pending = work->work.work_task.ta_pending;
121 	work->work.taskqueue = wq->taskqueue;
122 	if (delay != 0) {
123 		callout_reset(&work->timer, delay, _delayed_work_fn, work);
124 	} else {
125 		_delayed_work_fn((void *)work);
126 	}
127 
128 	return (!pending);
129 }
130 
131 static inline bool schedule_delayed_work(struct delayed_work *dwork,
132                                          unsigned long delay)
133 {
134         struct workqueue_struct wq;
135 
136         wq.taskqueue = taskqueue_thread[mycpuid];
137         return queue_delayed_work(&wq, dwork, delay);
138 }
139 
140 struct workqueue_struct * _create_workqueue_common(char *name, int cpus);
141 
142 #define	create_singlethread_workqueue(name)				\
143 	_create_workqueue_common(name, 1)
144 
145 #define	create_workqueue(name)						\
146 	_create_workqueue_common(name, MAXCPU)
147 
148 #define alloc_ordered_workqueue(name, flags)				\
149 	_create_workqueue_common(name, 1)
150 
151 #define alloc_workqueue(name, flags, max_active)			\
152 	_create_workqueue_common(name, max_active)
153 
154 void destroy_workqueue(struct workqueue_struct *wq);
155 
156 #define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
157 
158 static inline void
159 _flush_fn(void *context, int pending)
160 {
161 }
162 
163 static inline void
164 flush_taskqueue(struct taskqueue *tq)
165 {
166 	struct task flushtask;
167 
168 	TASK_INIT(&flushtask, 0, _flush_fn, NULL);
169 	taskqueue_enqueue(tq, &flushtask);
170 	taskqueue_drain(tq, &flushtask);
171 }
172 
173 static inline int
174 cancel_work_sync(struct work_struct *work)
175 {
176 	if (taskqueue_cancel_simple(&work->work_task))
177 		taskqueue_drain_simple(&work->work_task);
178 	return 0;
179 }
180 
181 /*
182  * This may leave work running on another CPU as it does on Linux.
183  */
184 static inline int
185 cancel_delayed_work(struct delayed_work *work)
186 {
187 	callout_stop(&work->timer);
188 	return (taskqueue_cancel_simple(&work->work.work_task) == 0);
189 }
190 
191 static inline int
192 cancel_delayed_work_sync(struct delayed_work *work)
193 {
194 	callout_cancel(&work->timer);
195 	if (taskqueue_cancel_simple(&work->work.work_task))
196 		taskqueue_drain_simple(&work->work.work_task);
197 	return 0;
198 }
199 
200 static inline bool
201 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
202 		                      unsigned long delay)
203 {
204 	cancel_delayed_work(dwork);
205 	queue_delayed_work(wq, dwork, delay);
206 	return false;
207 }
208 
209 static inline bool
210 flush_work(struct work_struct *work)
211 {
212 	taskqueue_drain_simple(&work->work_task);
213 
214 	return true;
215 }
216 
217 static inline void
218 destroy_work_on_stack(struct work_struct *work)
219 {
220 }
221 
222 /* System-wide workqueues */
223 extern struct workqueue_struct *system_wq;
224 extern struct workqueue_struct *system_long_wq;
225 extern struct workqueue_struct *system_power_efficient_wq;
226 extern struct workqueue_struct *system_unbound_wq;
227 
228 static inline unsigned int
229 work_busy(struct work_struct *work)
230 {
231 	/* Just pretend nothing is busy, this function is unreliable anyway */
232 	return 0;
233 }
234 
235 bool flush_delayed_work(struct delayed_work *dwork);
236 
237 void drain_workqueue(struct workqueue_struct *wq);
238 
239 #endif	/* _LINUX_WORKQUEUE_H_ */
240