xref: /dragonfly/sys/dev/drm/include/linux/workqueue.h (revision 7bcb6caf)
1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2014-2018 François Tigeot <ftigeot@wolfpond.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #ifndef	_LINUX_WORKQUEUE_H_
31 #define	_LINUX_WORKQUEUE_H_
32 
33 #include <linux/timer.h>
34 #include <linux/bitops.h>
35 #include <linux/lockdep.h>
36 #include <linux/atomic.h>
37 #include <linux/cpumask.h>
38 
39 #include <sys/taskqueue.h>
40 
41 struct workqueue_struct {
42 	struct taskqueue	*taskqueue;
43 };
44 
45 struct work_struct {
46 	struct	task 		work_task;
47 	struct	taskqueue	*taskqueue;
48 	void			(*fn)(struct work_struct *);
49 };
50 
51 struct delayed_work {
52 	struct work_struct	work;
53 	struct callout		timer;
54 	struct lwkt_token	token;
55 };
56 
57 static inline struct delayed_work *
58 to_delayed_work(struct work_struct *work)
59 {
60 
61 	return container_of(work, struct delayed_work, work);
62 }
63 
64 
65 static inline void
66 _work_fn(void *context, int pending)
67 {
68 	struct work_struct *work;
69 
70 	work = context;
71 	work->fn(work);
72 }
73 
74 #define	INIT_WORK(work, func) 	 					\
75 do {									\
76 	(work)->fn = (func);						\
77 	(work)->taskqueue = NULL;					\
78 	TASK_INIT(&(work)->work_task, 0, _work_fn, (work));		\
79 } while (0)
80 
81 #define INIT_WORK_ONSTACK(work, func)	INIT_WORK(work, func)
82 
83 #define INIT_DELAYED_WORK(_work, func)					\
84 do {									\
85 	INIT_WORK(&(_work)->work, func);				\
86 	lwkt_token_init(&(_work)->token, "workqueue token");		\
87 	callout_init_mp(&(_work)->timer);				\
88 } while (0)
89 
90 #define	INIT_DEFERRABLE_WORK	INIT_DELAYED_WORK
91 
92 #define	schedule_work(work)						\
93 do {									\
94 	(work)->taskqueue = taskqueue_thread[mycpuid];				\
95 	taskqueue_enqueue(taskqueue_thread[mycpuid], &(work)->work_task);	\
96 } while (0)
97 
98 #define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread[mycpuid])
99 
100 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
101 {
102 	(work)->taskqueue = (q)->taskqueue;
103 	/* Return opposite val to align with Linux logic */
104 	return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
105 }
106 
107 static inline void
108 _delayed_work_fn(void *arg)
109 {
110 	struct delayed_work *work;
111 
112 	work = arg;
113 	taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
114 }
115 
116 static inline int
117 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
118     unsigned long delay)
119 {
120 	int pending;
121 
122 	pending = work->work.work_task.ta_pending;
123 	work->work.taskqueue = wq->taskqueue;
124 	if (delay != 0) {
125 		lwkt_gettoken(&work->token);
126 		callout_reset(&work->timer, delay, _delayed_work_fn, work);
127 		lwkt_reltoken(&work->token);
128 	} else {
129 		_delayed_work_fn((void *)work);
130 	}
131 
132 	return (!pending);
133 }
134 
135 static inline bool schedule_delayed_work(struct delayed_work *dwork,
136                                          unsigned long delay)
137 {
138         struct workqueue_struct wq;
139         wq.taskqueue = taskqueue_thread[mycpuid];
140         return queue_delayed_work(&wq, dwork, delay);
141 }
142 
143 struct workqueue_struct * _create_workqueue_common(char *name, int cpus);
144 
145 #define	create_singlethread_workqueue(name)				\
146 	_create_workqueue_common(name, 1)
147 
148 #define	create_workqueue(name)						\
149 	_create_workqueue_common(name, MAXCPU)
150 
151 #define alloc_ordered_workqueue(name, flags)				\
152 	_create_workqueue_common(name, 1)
153 
154 #define alloc_workqueue(name, flags, max_active)			\
155 	_create_workqueue_common(name, max_active)
156 
157 void destroy_workqueue(struct workqueue_struct *wq);
158 
159 #define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
160 
161 static inline void
162 _flush_fn(void *context, int pending)
163 {
164 }
165 
166 static inline void
167 flush_taskqueue(struct taskqueue *tq)
168 {
169 	struct task flushtask;
170 
171 	PHOLD(curproc);
172 	TASK_INIT(&flushtask, 0, _flush_fn, NULL);
173 	taskqueue_enqueue(tq, &flushtask);
174 	taskqueue_drain(tq, &flushtask);
175 	PRELE(curproc);
176 }
177 
178 static inline int
179 cancel_work_sync(struct work_struct *work)
180 {
181 	if (work->taskqueue &&
182 	    taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
183 		taskqueue_drain(work->taskqueue, &work->work_task);
184 	return 0;
185 }
186 
187 /*
188  * This may leave work running on another CPU as it does on Linux.
189  */
190 static inline int
191 cancel_delayed_work(struct delayed_work *work)
192 {
193 
194 	lwkt_gettoken(&work->token);
195 	callout_stop(&work->timer);
196 	lwkt_reltoken(&work->token);
197 	if (work->work.taskqueue)
198 		return (taskqueue_cancel(work->work.taskqueue,
199 		    &work->work.work_task, NULL) == 0);
200 	return 0;
201 }
202 
203 static inline int
204 cancel_delayed_work_sync(struct delayed_work *work)
205 {
206 
207 	lwkt_gettoken(&work->token);
208 	callout_drain(&work->timer);
209 	lwkt_reltoken(&work->token);
210 	if (work->work.taskqueue &&
211 	    taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
212 		taskqueue_drain(work->work.taskqueue, &work->work.work_task);
213 	return 0;
214 }
215 
216 static inline bool
217 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
218 		                      unsigned long delay)
219 {
220 	cancel_delayed_work(dwork);
221 	queue_delayed_work(wq, dwork, delay);
222 	return false;
223 }
224 
225 static inline bool
226 flush_work(struct work_struct *work)
227 {
228 	if (work->taskqueue != NULL)
229 		taskqueue_drain(work->taskqueue, &work->work_task);
230 	return true;
231 }
232 
233 static inline void
234 destroy_work_on_stack(struct work_struct *work)
235 {
236 }
237 
238 /* System-wide workqueues */
239 extern struct workqueue_struct *system_wq;
240 extern struct workqueue_struct *system_long_wq;
241 extern struct workqueue_struct *system_power_efficient_wq;
242 extern struct workqueue_struct *system_unbound_wq;
243 
244 static inline unsigned int
245 work_busy(struct work_struct *work)
246 {
247 	/* Just pretend nothing is busy, this function is unreliable anyway */
248 	return 0;
249 }
250 
251 #endif	/* _LINUX_WORKQUEUE_H_ */
252