xref: /dragonfly/sys/dev/drm/include/linux/workqueue.h (revision f503b4c4)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2014 François Tigeot
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUX_WORKQUEUE_H_
30 #define	_LINUX_WORKQUEUE_H_
31 
32 #include <sys/types.h>
33 #include <sys/malloc.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/timer.h>
37 
38 #include <sys/taskqueue.h>
39 
40 struct workqueue_struct {
41 	struct taskqueue	*taskqueue;
42 };
43 
44 struct work_struct {
45 	struct	task 		work_task;
46 	struct	taskqueue	*taskqueue;
47 	void			(*fn)(struct work_struct *);
48 };
49 
50 struct delayed_work {
51 	struct work_struct	work;
52 	struct callout		timer;
53 	struct lwkt_token	token;
54 };
55 
56 static inline struct delayed_work *
57 to_delayed_work(struct work_struct *work)
58 {
59 
60 	return container_of(work, struct delayed_work, work);
61 }
62 
63 
64 static inline void
65 _work_fn(void *context, int pending)
66 {
67 	struct work_struct *work;
68 
69 	work = context;
70 	work->fn(work);
71 }
72 
73 #define	INIT_WORK(work, func) 	 					\
74 do {									\
75 	(work)->fn = (func);						\
76 	(work)->taskqueue = NULL;					\
77 	TASK_INIT(&(work)->work_task, 0, _work_fn, (work));		\
78 } while (0)
79 
80 #define	INIT_DELAYED_WORK(_work, func)					\
81 do {									\
82 	INIT_WORK(&(_work)->work, func);				\
83 	lwkt_token_init(&(_work)->token, "workqueue token");		\
84 	callout_init_mp(&(_work)->timer);				\
85 } while (0)
86 
87 #define	INIT_DEFERRABLE_WORK	INIT_DELAYED_WORK
88 
89 #define	schedule_work(work)						\
90 do {									\
91 	(work)->taskqueue = taskqueue_thread[mycpuid];				\
92 	taskqueue_enqueue(taskqueue_thread[mycpuid], &(work)->work_task);	\
93 } while (0)
94 
95 #define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread[mycpuid])
96 
97 #define	queue_work(q, work)						\
98 do {									\
99 	(work)->taskqueue = (q)->taskqueue;				\
100 	taskqueue_enqueue((q)->taskqueue, &(work)->work_task);		\
101 } while (0)
102 
103 static inline void
104 _delayed_work_fn(void *arg)
105 {
106 	struct delayed_work *work;
107 
108 	work = arg;
109 	taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
110 }
111 
112 static inline int
113 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
114     unsigned long delay)
115 {
116 	int pending;
117 
118 	pending = work->work.work_task.ta_pending;
119 	work->work.taskqueue = wq->taskqueue;
120 	if (delay != 0) {
121 		lwkt_gettoken(&work->token);
122 		callout_reset(&work->timer, delay, _delayed_work_fn, work);
123 		lwkt_reltoken(&work->token);
124 	} else {
125 		_delayed_work_fn((void *)work);
126 	}
127 
128 	return (!pending);
129 }
130 
131 static inline bool schedule_delayed_work(struct delayed_work *dwork,
132                                          unsigned long delay)
133 {
134         struct workqueue_struct wq;
135         wq.taskqueue = taskqueue_thread[mycpuid];
136         return queue_delayed_work(&wq, dwork, delay);
137 }
138 
139 static inline struct workqueue_struct *
140 _create_workqueue_common(char *name, int cpus)
141 {
142 	struct workqueue_struct *wq;
143 
144 	wq = kmalloc(sizeof(*wq), DRM_MEM_KMS, M_WAITOK);
145 	wq->taskqueue = taskqueue_create((name), M_WAITOK,
146 	    taskqueue_thread_enqueue,  &wq->taskqueue);
147 	taskqueue_start_threads(&wq->taskqueue, cpus, 0, -1, "%s", name);
148 
149 	return (wq);
150 }
151 
152 
153 #define	create_singlethread_workqueue(name)				\
154 	_create_workqueue_common(name, 1)
155 
156 #define	create_workqueue(name)						\
157 	_create_workqueue_common(name, MAXCPU)
158 
159 #define alloc_ordered_workqueue(name, flags)				\
160 	_create_workqueue_common(name, 1)
161 
162 static inline void
163 destroy_workqueue(struct workqueue_struct *wq)
164 {
165 	taskqueue_free(wq->taskqueue);
166 	kfree(wq, DRM_MEM_KMS);
167 }
168 
169 #define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
170 
171 static inline void
172 _flush_fn(void *context, int pending)
173 {
174 }
175 
176 static inline void
177 flush_taskqueue(struct taskqueue *tq)
178 {
179 	struct task flushtask;
180 
181 	PHOLD(curproc);
182 	TASK_INIT(&flushtask, 0, _flush_fn, NULL);
183 	taskqueue_enqueue(tq, &flushtask);
184 	taskqueue_drain(tq, &flushtask);
185 	PRELE(curproc);
186 }
187 
188 static inline int
189 cancel_work_sync(struct work_struct *work)
190 {
191 	if (work->taskqueue &&
192 	    taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
193 		taskqueue_drain(work->taskqueue, &work->work_task);
194 	return 0;
195 }
196 
197 /*
198  * This may leave work running on another CPU as it does on Linux.
199  */
200 static inline int
201 cancel_delayed_work(struct delayed_work *work)
202 {
203 
204 	lwkt_gettoken(&work->token);
205 	callout_stop(&work->timer);
206 	lwkt_reltoken(&work->token);
207 	if (work->work.taskqueue)
208 		return (taskqueue_cancel(work->work.taskqueue,
209 		    &work->work.work_task, NULL) == 0);
210 	return 0;
211 }
212 
213 static inline int
214 cancel_delayed_work_sync(struct delayed_work *work)
215 {
216 
217 	lwkt_gettoken(&work->token);
218 	callout_drain(&work->timer);
219 	lwkt_reltoken(&work->token);
220 	if (work->work.taskqueue &&
221 	    taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
222 		taskqueue_drain(work->work.taskqueue, &work->work.work_task);
223 	return 0;
224 }
225 
226 #endif	/* _LINUX_WORKQUEUE_H_ */
227