xref: /dragonfly/sys/dev/drm/include/linux/workqueue.h (revision 0085fdcc)
1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2014-2019 François Tigeot <ftigeot@wolfpond.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #ifndef	_LINUX_WORKQUEUE_H_
31 #define	_LINUX_WORKQUEUE_H_
32 
33 #include <linux/timer.h>
34 #include <linux/bitops.h>
35 #include <linux/lockdep.h>
36 #include <linux/atomic.h>
37 #include <linux/cpumask.h>
38 
39 #include <sys/taskqueue.h>
40 
41 struct workqueue_struct {
42 	struct taskqueue	*taskqueue;
43 };
44 
45 struct work_struct {
46 	struct	task 		work_task;
47 	struct	taskqueue	*taskqueue;
48 	void			(*func)(struct work_struct *);
49 };
50 
51 struct delayed_work {
52 	struct work_struct	work;
53 	struct callout		timer;
54 };
55 
56 static inline struct delayed_work *
57 to_delayed_work(struct work_struct *work)
58 {
59 
60 	return container_of(work, struct delayed_work, work);
61 }
62 
63 
64 static inline void
65 _work_fn(void *context, int pending)
66 {
67 	struct work_struct *work;
68 
69 	work = context;
70 	work->func(work);
71 }
72 
73 #define	INIT_WORK(work, _func) 	 					\
74 do {									\
75 	(work)->func = (_func);						\
76 	(work)->taskqueue = NULL;					\
77 	TASK_INIT(&(work)->work_task, 0, _work_fn, (work));		\
78 } while (0)
79 
80 #define INIT_WORK_ONSTACK(work, _func)	INIT_WORK(work, _func)
81 
82 #define INIT_DELAYED_WORK(_work, _func)					\
83 do {									\
84 	INIT_WORK(&(_work)->work, _func);				\
85 	callout_init_mp(&(_work)->timer);				\
86 } while (0)
87 
88 #define	INIT_DEFERRABLE_WORK	INIT_DELAYED_WORK
89 
90 #define	schedule_work(work)						\
91 do {									\
92 	taskqueue_enqueue_optq(taskqueue_thread[mycpuid], &(work)->taskqueue, &(work)->work_task);	\
93 } while (0)
94 
95 #define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread[mycpuid])
96 
97 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
98 {
99 	/* Return opposite val to align with Linux logic */
100 	return !taskqueue_enqueue_optq((q)->taskqueue, &(work)->taskqueue, &(work)->work_task);
101 }
102 
103 static inline void
104 _delayed_work_fn(void *arg)
105 {
106 	struct delayed_work *work;
107 
108 	work = arg;
109 	taskqueue_enqueue_optq(work->work.taskqueue, &work->work.taskqueue, &work->work.work_task);
110 }
111 
112 static inline int
113 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
114     unsigned long delay)
115 {
116 	int pending;
117 
118 	pending = work->work.work_task.ta_pending;
119 	work->work.taskqueue = wq->taskqueue;
120 	if (delay != 0) {
121 		callout_reset(&work->timer, delay, _delayed_work_fn, work);
122 	} else {
123 		_delayed_work_fn((void *)work);
124 	}
125 
126 	return (!pending);
127 }
128 
129 static inline bool schedule_delayed_work(struct delayed_work *dwork,
130                                          unsigned long delay)
131 {
132         struct workqueue_struct wq;
133 
134         wq.taskqueue = taskqueue_thread[mycpuid];
135         return queue_delayed_work(&wq, dwork, delay);
136 }
137 
138 struct workqueue_struct * _create_workqueue_common(char *name, int cpus);
139 
140 #define	create_singlethread_workqueue(name)				\
141 	_create_workqueue_common(name, 1)
142 
143 #define	create_workqueue(name)						\
144 	_create_workqueue_common(name, MAXCPU)
145 
146 #define alloc_ordered_workqueue(name, flags)				\
147 	_create_workqueue_common(name, 1)
148 
149 #define alloc_workqueue(name, flags, max_active)			\
150 	_create_workqueue_common(name, max_active)
151 
152 void destroy_workqueue(struct workqueue_struct *wq);
153 
154 #define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
155 
156 static inline void
157 _flush_fn(void *context, int pending)
158 {
159 }
160 
161 static inline void
162 flush_taskqueue(struct taskqueue *tq)
163 {
164 	struct task flushtask;
165 
166 	TASK_INIT(&flushtask, 0, _flush_fn, NULL);
167 	taskqueue_enqueue(tq, &flushtask);
168 	taskqueue_drain(tq, &flushtask);
169 }
170 
171 static inline int
172 cancel_work_sync(struct work_struct *work)
173 {
174 	if (taskqueue_cancel_simple(&work->work_task))
175 		taskqueue_drain_simple(&work->work_task);
176 	return 0;
177 }
178 
179 /*
180  * This may leave work running on another CPU as it does on Linux.
181  */
182 static inline int
183 cancel_delayed_work(struct delayed_work *work)
184 {
185 	callout_stop(&work->timer);
186 	return (taskqueue_cancel_simple(&work->work.work_task) == 0);
187 }
188 
189 static inline int
190 cancel_delayed_work_sync(struct delayed_work *work)
191 {
192 	callout_cancel(&work->timer);
193 	if (taskqueue_cancel_simple(&work->work.work_task))
194 		taskqueue_drain_simple(&work->work.work_task);
195 	return 0;
196 }
197 
198 static inline bool
199 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
200 		                      unsigned long delay)
201 {
202 	cancel_delayed_work(dwork);
203 	queue_delayed_work(wq, dwork, delay);
204 	return false;
205 }
206 
207 static inline bool
208 flush_work(struct work_struct *work)
209 {
210 	taskqueue_drain_simple(&work->work_task);
211 
212 	return true;
213 }
214 
215 static inline void
216 destroy_work_on_stack(struct work_struct *work)
217 {
218 }
219 
220 /* System-wide workqueues */
221 extern struct workqueue_struct *system_wq;
222 extern struct workqueue_struct *system_long_wq;
223 extern struct workqueue_struct *system_power_efficient_wq;
224 extern struct workqueue_struct *system_unbound_wq;
225 
226 static inline unsigned int
227 work_busy(struct work_struct *work)
228 {
229 	/* Just pretend nothing is busy, this function is unreliable anyway */
230 	return 0;
231 }
232 
233 bool flush_delayed_work(struct delayed_work *dwork);
234 
235 #endif	/* _LINUX_WORKQUEUE_H_ */
236